file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
storage_service.go
/* * @Description: * @version: * @Company: iwhalecloud * @Author: ddh * @Date: 2021-02-07 16:32:07 * @LastEditors: ddh * @LastEditTime: 2021-02-07 16:32:07 */ package service import ( "DBaas/models" "DBaas/utils" "DBaas/x/response" "encoding/json" "errors" "fmt" "github.com/go-xorm/xorm" "github.com/kataras/iris/v12" appsv1 "k8s.io/api/apps/v1" core1 "k8s.io/api/core/v1" storage1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" meta1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "strconv" "strings" "time" ) type StorageService interface { UserAssign(id int, userIdStr string) error List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) Update(id int, remake string, nodeNum int) error Delete(id int) error Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) PvDelete(id int) (err error) PVList(page int, pageSize int, key string, userTag string) ([]models.PersistentVolume, int64, error) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) error SelectOneScByName(name string) (models.Sc, bool) SelectOnePvByName(name string) (models.PersistentVolume, bool) UserRegister(userId int, scList []map[string]interface{}) error DeletescUserbyUser(userId int) (bool, string) AddscUserbyuser(userId int, scList []map[string]interface{}) (bool, string) } type storageService struct { Engine *xorm.Engine cs CommonService } func NewStorageService(engine *xorm.Engine, cs CommonService) StorageService { return &storageService{ Engine: engine, cs: cs, } } func (ss *storageService) PVList(page int, pageSize int, key string, userTag string) (list []models.PersistentVolume, count int64, err error) { list = make([]models.PersistentVolume, 0) where := "name like ?" args := []interface{}{"%" + key + "%"} // AAAA为root用户可查看所有PV if userTag != "AAAA" { where += " AND user_tag = ?" args = append(args, userTag) } err = ss.Engine.Where(where, args...).Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&list) if err != nil { return } count, _ = ss.Engine.Where(where, args...).Count(&models.PersistentVolume{}) userCache := map[string]models.User{} for i := range list { tag := list[i].UserTag user, ok := userCache[tag] if !ok { user = models.User{UserTag: tag} _, _ = ss.Engine.Get(&user) userCache[tag] = user } list[i].UserId = user.Id list[i].Tenant = user.UserName list[i].CpuTotal = user.CpuAll list[i].MemTotal = int(user.MemAll) pod := models.Instance{Id: list[i].PodId} _, _ = ss.Engine.Cols("name").Get(&pod) list[i].PodName = pod.Name sc := models.Sc{Id: list[i].ScId} _, _ = ss.Engine.Cols("name").Get(&sc) list[i].SCName = sc.Name } return } func (ss *storageService) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) (err error) { nameExist, _ := ss.Engine.Where("is_deploy = true").Exist(&models.ClusterInstance{Name: mysqlName}) if nameExist { return errors.New("This name already exists ") } exist, _ := ss.Engine.Exist(&models.ClusterInstance{PvId: pvId}) if exist { return errors.New("The MySQL created by the current pv already exists ") } dbPV := models.PersistentVolume{Id: pvId} _, err = ss.Engine.Get(&dbPV) if err != nil { return } // 检查pv是否存在 err, pvSource := ss.cs.GetResources("pv", dbPV.Name, ss.cs.GetNameSpace(), meta1.GetOptions{}) if err != nil { return fmt.Errorf("pv does not exist in k8s, err: %v", err) } // 根据pv查询已删除的pod pod := models.Instance{Id: dbPV.PodId} existPod, err := ss.Engine.Unscoped().Cols("cluster_id").Get(&pod) if !existPod { return errors.New(fmt.Sprintf("Found pod is error: %v", err)) } cluster := models.ClusterInstance{Id: pod.ClusterId} existCluster, err := ss.Engine.Unscoped().Cols("user_id", "image_id", "storage", "user_tag", "org_tag", "secret").Get(&cluster) if !existCluster { return errors.New(fmt.Sprintf("Found cluster is error: %v", err)) } limitMem, limitCpu := int(storageMap["mem"].(float64)), int(storageMap["cpu"].(float64)) enough, msg, _ := getUserResource(userId, limitCpu, limitMem, cluster.Storage, ss.Engine) if !enough { return errors.New(msg) } pvcName := mysqlName + "-pvc" scName := mysqlName + "-sc" svcName := mysqlName + "-svc" mysqlImage := models.Images{Id: cluster.ImageId} hasImage, err := ss.Engine.Get(&mysqlImage) if !hasImage { return errors.New(fmt.Sprintf("Found mysql image is error: %v", err)) } var imageURL string if mysqlImage.Status == "Invalid" { imageURL = fmt.Sprintf("%v:%v", mysqlImage.ImageName, mysqlImage.Version) } else { imageURL = fmt.Sprintf("%v/%v:%v", getImageAddress(ss.Engine), mysqlImage.ImageName, mysqlImage.Version) } // 设置pv的sc名称 pv := (*pvSource).(*core1.PersistentVolume) pv.Spec.ClaimRef = nil pv.Spec.StorageClassName = scName _, err = ss.cs.GetClientSet().CoreV1().PersistentVolumes().Update(*ss.cs.GetCtx(), pv, meta1.UpdateOptions{}) if err != nil { return } pvcConfig := core1.PersistentVolumeClaim{ TypeMeta: meta1.TypeMeta{Kind: "PersistentVolumeClaim", APIVersion: "v1"}, ObjectMeta: meta1.ObjectMeta{Name: pvcName}, Spec: core1.PersistentVolumeClaimSpec{ StorageClassName: &scName, AccessModes: []core1.PersistentVolumeAccessMode{core1.ReadWriteOnce}, Resources: core1.ResourceRequirements{ Requests: map[core1.ResourceName]resource.Quantity{ "storage": resource.MustParse(dbPV.Capacity), }, }, }, } err = ss.cs.CreateOption("pvc", ss.cs.GetNameSpace(), &pvcConfig, meta1.CreateOptions{}) if err != nil { return } svcConfig := core1.Service{ TypeMeta: meta1.TypeMeta{Kind: "Service", APIVersion: "v1"}, ObjectMeta: meta1.ObjectMeta{Name: svcName}, Spec: core1.ServiceSpec{ Type: core1.ServiceTypeNodePort, Ports: []core1.ServicePort{ {Name: "mysqlport", Port: 3306, TargetPort: intstr.FromInt(3306)}, {Name: "sidecar-ttyd", Port: 7681, TargetPort: intstr.FromInt(7681)}, }, Selector: map[string]string{"app": "mysql"}, }, } err = ss.cs.CreateOption("service", ss.cs.GetNameSpace(), &svcConfig, meta1.CreateOptions{}) if err != nil { return } secretMap := map[string]string{} err = json.Unmarshal([]byte(cluster.Secret), &secretMap) if err != nil { return } mysqlConfig := appsv1.Deployment{ TypeMeta: meta1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, ObjectMeta: meta1.ObjectMeta{Name: mysqlName}, Spec: appsv1.DeploymentSpec{ Selector: &meta1.LabelSelector{MatchLabels: map[string]string{"app": "mysql"}}, Strategy: appsv1.DeploymentStrategy{Type: appsv1.RecreateDeploymentStrategyType}, Template: core1.PodTemplateSpec{ ObjectMeta: meta1.ObjectMeta{Labels: map[string]string{"app": "mysql"}}, Spec: core1.PodSpec{ Containers: []core1.Container{ { Name: "mysql", Image: imageURL, Resources: core1.ResourceRequirements{ Limits: core1.ResourceList{ "memory": resource.MustParse(fmt.Sprintf("%vGi", limitMem)), "cpu": resource.MustParse(strconv.Itoa(limitCpu)), }, }, Env: []core1.EnvVar{{Name: "MYSQL_ROOT_PASSWORD", Value: secretMap["ROOT_PASSWORD"]}}, Ports: []core1.ContainerPort{{Name: "mysql", ContainerPort: 3306}}, VolumeMounts: []core1.VolumeMount{{Name: "mysql-persistent-storage", MountPath: "/var/lib/mysql"}}, }, { Name: "sidecar", Image: "10.45.10.107:8099/k8s/mysql-sidecar:ttyd", Ports: []core1.ContainerPort{ {Name: "mysql", ContainerPort: 7681}, }, }, }, Volumes: []core1.Volume{ {Name: "mysql-persistent-storage", VolumeSource: core1.VolumeSource{ PersistentVolumeClaim: &core1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}, }}, }, }, }, }, } _, err = ss.cs.GetClientSet().AppsV1().Deployments(ss.cs.GetNameSpace()).Create(*ss.cs.GetCtx(), &mysqlConfig, meta1.CreateOptions{}) if err != nil { return err } // 查找pod名称 var podName string for i := 0; i < 10; i++ { err, podListSource := ss.cs.GetResources("pod", "", ss.cs.GetNameSpace(), meta1.ListOptions{LabelSelector: "app=mysql"}) if err != nil { utils.LoggerError(err) <-time.After(time.Second) continue } podList := (*podListSource).(*core1.PodList) for _, pod := range podList.Items { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim == nil { continue } if volume.PersistentVolumeClaim.ClaimName == pvcName { podName = pod.Name goto FindPodNameEnd } } } <-time.After(time.Second) } FindPodNameEnd: clusterIns := models.ClusterInstance{ Name: mysqlName, K8sName: podName, Status: models.ClusterStatusCreating, Storage: cluster.Storage, UserId: cluster.UserId, ImageId: cluster.ImageId, ScName: scName, Replicas: "1", LimitCpu: limitCpu, LimitMem: limitMem, Remark: remark, UserTag: cluster.UserTag, OrgTag: cluster.OrgTag, IsDeploy: true, Secret: cluster.Secret, PvId: pvId, } _, err = ss.Engine.Insert(&clusterIns) if err == nil { if qos != nil { qos.ClusterId = clusterIns.Id _, _ = ss.Engine.Insert(qos) go ss.cs.SetQosConfig(dbPV.Name, clusterIns.Id) } go ss.cs.CreatStatusTimeout(clusterIns.Id) go ss.cs.ScanClusterPod(clusterIns.Id, clusterIns.K8sName, 1, true) go ss.cs.PollingPVStatus([]models.PersistentVolume{dbPV}, core1.VolumeBound) dbPV.PvcName = pvcName _, _ = ss.Engine.ID(dbPV.Id).Cols("pvc_name").Update(&dbPV) } return err } // UserAssign userIdStr为-1时表示ALL,为空时表示无租户,有租户时以逗号分割 func (ss *storageService) UserAssign(id int, userIdStr string) error { if id <= 0 { return errors.New("storage id must > 0") } sc := models.Sc{Id: id} exist, err := ss.Engine.Cols("name", "sc_type").Get(&sc) if !exist { return fmt.Errorf("not found sc %v, error: %v", id, err) } assignAll := userIdStr == "-1" if assignAll { if sc.ScType == models.ScTypeUnique { return errors.New("the unique storage cannot be set to ALL") } _, _ = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser)) } else { dbUsers := make([]models.ScUser, 0) err = ss.Engine.Where("sc_id = ?", id).Find(&dbUsers) if err != nil { return err } userStrList := strings.Split(userIdStr, ",") if sc.ScType == models.ScTypeUnique { // 处理独占存储 var clusterCount int64 clusterCount, err = ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { return response.NewMsg("This storage is already in use and cannot be modified", "此存储已被使用,无法修改") } if len(userStrList) <= 0 { _, err = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser)) return err } if len(userStrList) != 1 { return errors.New("unique-storage only assign one user") } var userId int userId, err = strconv.Atoi(userStrList[0]) if err != nil { return err } scUser := models.ScUser{UserId: userId, ScId: id} if len(dbUsers) == 0 { _, err = ss.Engine.Insert(&scUser) } else { _, err = ss.Engine.Where("sc_id = ?", id).Update(&scUser) } } else { // <userId,index> dbUsersM := map[int]int{} for i := range dbUsers { dbUsersM[dbUsers[i].UserId] = i } insertList := make([]models.ScUser, 0) for i := range userStrList { if len(userStrList[i]) == 0 { continue } var userId, err = strconv.Atoi(userStrList[i]) if err != nil { utils.LoggerError(err) continue } if _, ok := dbUsersM[userId]; ok { // 数据库已存在直接跳过,并标记为-1 dbUsersM[userId] = -1 continue } insertList = append(insertList, models.ScUser{ScId: id, UserId: userId}) } for _, v := range dbUsersM { if v != -1 { clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).And("user_id = ?", dbUsers[v].UserId).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { u := models.User{} _, _ = ss.Engine.ID(dbUsers[v].UserId).Cols("user_name").Get(&u) return response.NewMsg(fmt.Sprintf("%v have already used this storage, this user cannot be deleted", u.UserName), fmt.Sprintf("%v已使用此存储,不能删除此用户", u.UserName)) } } } if len(insertList) > 0 { _, err := ss.Engine.Insert(&insertList) if err != nil { return err } } for _, v := range dbUsersM { if v != -1 { _, _ = ss.Engine.ID(dbUsers[v].Id).Delete(new(models.ScUser)) } } } } _, err = ss.Engine.ID(id).Cols("assign_all").Update(&models.Sc{AssignAll: assignAll}) return err } func (ss *storageService) List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) { scPv := make([]models.Sc, 0) var count int64 if userId <= 0 && len(userTag) != 0 { u := models.User{} _, _ = ss.Engine.Where("user_tag = ?", userTag).Cols("id").Get(&u) userId = u.Id } if userId > 0 { scList := make([]models.ScUser, 0) err := ss.Engine.Where("user_id = ?", userId).Find(&scList) utils.LoggerError(err) for _, v := range scList { sc := models.Sc{Id: v.ScId} _, err = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Get(&sc) utils.LoggerError(err) if len(sc.Name) > 0 { scPv = append(scPv, sc) } } assignAllSc := make([]models.Sc, 0) err = ss.Engine.Where("assign_all = true").Find(&assignAllSc) utils.LoggerError(err) if len(assignAllSc) > 0 { scPv = append(scPv, assignAllSc...) } count = int64(len(scPv)) if utils.MustInt(page, pageSize) { min := pageSize * (page - 1) max := min + pageSize scPv = scPv[min:utils.Min(max, len(scPv))] } } else { err := ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&scPv) utils.LoggerError(err) count, _ = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Count(&models.Sc{}) } scReturn := make([]models.ReturnSc, len(scPv)) for i, sc := range scPv { pv := make([]models.PersistentVolume, 0) if sc.ScType == models.ScTypeUnique { err := ss.Engine.Where(" sc_id = ?", sc.Id).OrderBy("id").Find(&pv) utils.LoggerError(err) sc.NodeNum = len(pv) } cluster := make([]models.ClusterInstance, 0) err := ss.Engine.Where("sc_name = ?", sc.Name).Omit("yaml_text").Find(&cluster) utils.LoggerError(err) var scUserRaw json.RawMessage if sc.AssignAll { scUserRaw = []byte("-1") } else { scUser := make([]models.ScUser, 0) err = ss.Engine.Where("sc_id = ?", sc.Id).Find(&scUser) utils.LoggerError(err) for i, user := range scUser { u := models.User{Id: user.UserId} _, err = ss.Engine.Get(&u) utils.LoggerError(err) scUser[i].UserName = u.UserName } scUserRaw, _ = json.Marshal(scUser) } scReturn[i] = models.ReturnSc{Sc: sc, Children: pv, Cluster: cluster, ScUser: scUserRaw} } if isFilter { for i, sc := range scReturn { if sc.ScType == models.ScTypeUnique && len(sc.Cluster) > 0 { scReturn = append(scReturn[0:i], scReturn[i+1:]...) } } } return scReturn, count } func (ss *storageService) Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) { sc := models.Sc{ Name: scName, ScType: scType, NodeNum: nodeNum, ReclaimPolicy: reclaimPolicy, Describe: remark, OrgTag: orgTag, UserTag: userTag, AssignAll: userIdStr == "-1", } namespace := ss.cs.GetNameSpace() // 独有存储,在k8s里面新建 if scType == "unique-storage" { reclaimPolicyCore := core1.PersistentVolumeReclaimPolicy(reclaimPolicy) scConfig := storage1.StorageClass{ TypeMeta: meta1.TypeMeta{ Kind: "StorageClass", APIVersion: "storage.k8s.io/v1", }, ObjectMeta: meta1.ObjectMeta{ Name: scName, }, Provisioner: "kubernetes.io/no-provisioner", ReclaimPolicy: &reclaimPolicyCore, } err := ss.cs.CreateOption("sc", namespace, &scConfig, meta1.CreateOptions{}) if err != nil { return sc, err } } else { err, scAddr := ss.cs.GetResources("sc", scName, namespace, meta1.GetOptions{}) if err != nil { return sc, err } if value, ok := (*scAddr).(*storage1.StorageClass); ok { sc.ReclaimPolicy = string(*value.ReclaimPolicy) } } _, err := ss.Engine.Insert(&sc) if err != nil || sc.AssignAll { return sc, err } userIds := strings.Split(userIdStr, ",") su := make([]models.ScUser, 0) for i := range userIds { if len(userIds[i]) == 0 { continue } id, err := strconv.Atoi(userIds[i]) if err != nil { utils.LoggerError(err) continue } su = append(su, models.ScUser{UserId: id, ScId: sc.Id}) } if userId > 0 { su = append(su, models.ScUser{UserId: userId, ScId: sc.Id}) } if len(su) > 0 { _, _ = ss.Engine.Insert(&su) } return sc, nil } func (ss *storageService) Update(id int, remake string, nodeNum int) error { sc := models.Sc{ Id: id, Describe: remake, NodeNum: nodeNum, } _, err := ss.Engine.ID(sc.Id).Update(&sc) return err } func (ss *storageService) Delete(id int) error { if id <= 0 { return errors.New("storage id must > 0") } sc := models.Sc{ Id: id, } exist, err := ss.Engine.Get(&sc) if err != nil { return err } if !exist { return nil } clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { return response.NewMsg("This storage is occupied by the cluster and cannot be deleted", "此存储被集群占用,无法删除!") } if sc.ScType == models.ScTypeUnique { pvCount, err := ss.Engine.Where("sc_id = ?", id).Count(new(models.PersistentVolume)) if err != nil { return err } if pvCount > 0 { return response.NewMsg("This store has pv and cannot be deleted", "请先删除此存储下的pv!") } err = ss.cs.DeleteOption("sc", sc.Name, ss.cs.GetNameSpace(), meta1.DeleteOptions{}) if err != nil { return err } } _, err = ss.Engine.ID(sc.Id).Delete(&sc) _, _ = ss.Engine.Where("sc_id = ?", sc.Id).Delete(new(models.ScUser)) return err } func (ss *storageService) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) { ipAddr := "" port := "" if strings.Contains(mountPoint, ":") { ipAddr = strings.Split(mountPoint, ":")[0] port = strings.Split(mountPoint, ":")[1] } else { return false, models.PersistentVolume{}, "mountPoint format error" } pv := models.PersistentVolume{ Name: pvName, ScId: storageId, Lun: lun, Capacity: size, Iqn: iqn, IpAddr: ipAddr, Port: port, UserTag: userTag, OrgTag: orgTag, } sc := models.Sc{Id: storageId} success, err := ss.Engine.Get(&sc) utils.LoggerError(err) if !success { if err != nil { return success, pv, err.Error() } else { return success, pv, "" } } scConfig := core1.PersistentVolume{ TypeMeta: meta1.TypeMeta{ Kind: "PersistentVolume", APIVersion: "v1", }, ObjectMeta: meta1.ObjectMeta{ Name: pvName, }, Spec: core1.PersistentVolumeSpec{ Capacity: core1.ResourceList{ core1.ResourceName("storage"): resource.MustParse(fmt.Sprintf("%sGi", size)), }, AccessModes: []core1.PersistentVolumeAccessMode{core1.PersistentVolumeAccessMode("ReadWriteOnce")}, StorageClassName: sc.Name, PersistentVolumeSource: core1.PersistentVolumeSource{ ISCSI: &core1.ISCSIPersistentVolumeSource{ TargetPortal: mountPoint, IQN: iqn, ISCSIInterface: "iser", Lun: int32(lun), FSType: "xfs", ReadOnly: false, }, }, }, } err = ss.cs.CreateOption("pv", namespace, &scConfig, meta1.CreateOptions{}) if err != nil { return false, pv, err.Error() } _, err = ss.Engine.Insert(&pv) if err != nil { return false, pv, err.Error() } return true, pv, "" } func (ss *storageService) PvDelete(id int) (err error) { pv := models.PersistentVolume{Id: id} hasPV, err := ss.Engine.Get(&pv) if !hasPV { return fmt.Errorf("Not found pv %v error: %s ", id, err) } if pv.Status == string(core1.VolumeBound) { return errors.New("Bound status cannot be deleted ") } sc := models.Sc{Id: pv.ScId} hasSc, err := ss.Engine.Get(&sc) if !hasSc { return fmt.Errorf("Not found sc %v error: %s ", pv.ScId, err) } if sc.ScType != "shared-storage" { // 判断没有没cluster集群占用 existCluster, err := ss.Engine.Exist(&models.ClusterInstance{ScName: sc.Name}) if err != nil { return err } if existCluster { return errors.New("There are clusters in this PV ") } } err = ss.cs.DeleteOption("pv", pv.Name, "", meta1.DeleteOptions{}) if err != nil && !utils.ErrorContains(err, "not found") { return } _, err = ss.Engine.ID(pv.Id).Delete(&pv) if err != nil { return } return nil } func (ss *storageService) SelectOneScByName(name string) (models.Sc, bool) { var sc models.Sc _, err := ss.Engine.Where(" name = ? ", name).Get(&sc) utils.LoggerError(err) return sc, err == nil } func (ss *storageService) SelectOnePvByName(name string) (models.PersistentVolume, bool) { var pv models.PersistentVolume _, err := ss.Engine.Where(" name = ? ", name).Get(&pv) utils.LoggerError(err) return pv, err == nil } func (ss *storageService) UserRegister(userId int, scList []map[string]interface{}) e
= ss.Engine.NewSession() defer session.Close() if err := session.Begin(); err != nil { return err } scUser := models.ScUser{UserId: userId} _, err := session.Delete(&scUser) if err != nil { return err } if len(scList) > 0 { for _, scInfo := range scList { if scInfo["type"] == "ready" { scId := int(scInfo["id"].(float64)) sc := models.Sc{Id: scId} success, err := session.Cols("sc_type").Get(&sc) if !success { _ = session.Rollback() return fmt.Errorf("not found sc %v, error: %v", scId, err) } count, err := session.Where(" sc_id = ? ", scId).Count(new(models.ScUser)) if err != nil { _ = session.Rollback() return err } if sc.ScType == "unique-storage" && count > 1 { return errors.New("unique-storage only assign one user") } scUser := models.ScUser{UserId: userId, ScId: sc.Id} _, err = session.Insert(&scUser) if err != nil { _ = session.Rollback() return err } } } } err = session.Commit() return err } func (ss *storageService) DeletescUserbyUser(userId int) (bool, string) { scUser := models.ScUser{UserId: userId} _, err := ss.Engine.Delete(&scUser) if err != nil { utils.LoggerError(err) return true, err.Error() } return true, "" } func (ss *storageService) AddscUserbyuser(userId int, scList []map[string]interface{}) (bool, string) { session := ss.Engine.NewSession() defer session.Close() if err := session.Begin(); err != nil { iris.New().Logger().Info(err.Error()) } if len(scList) > 0 { for _, scInfo := range scList { if scInfo["type"] == "ready" { scId := int(scInfo["id"].(float64)) sc := models.Sc{Id: scId} success, err := session.Get(&sc) if !success { if err != nil { session.Rollback() return false, err.Error() } else { session.Rollback() return false, "" } } scUserList := make([]models.ScUser, 0) err = session.Where(" sc_id = ? ", scId).Find(&scUserList) if err != nil { session.Rollback() return false, err.Error() } if sc.ScType == "unique-storage" { if len(scUserList) > 1 { return false, "unique-storage only assign one user" } } scUser := models.ScUser{UserId: userId, ScId: sc.Id} _, err = session.Insert(&scUser) if err != nil { session.Rollback() utils.LoggerError(err) return false, err.Error() } } } } session.Commit() return true, "" }
rror { session :
identifier_name
storage_service.go
/* * @Description: * @version: * @Company: iwhalecloud * @Author: ddh * @Date: 2021-02-07 16:32:07 * @LastEditors: ddh * @LastEditTime: 2021-02-07 16:32:07 */ package service import ( "DBaas/models" "DBaas/utils" "DBaas/x/response" "encoding/json" "errors" "fmt" "github.com/go-xorm/xorm" "github.com/kataras/iris/v12" appsv1 "k8s.io/api/apps/v1" core1 "k8s.io/api/core/v1" storage1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" meta1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "strconv" "strings" "time" ) type StorageService interface { UserAssign(id int, userIdStr string) error List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) Update(id int, remake string, nodeNum int) error Delete(id int) error Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) PvDelete(id int) (err error) PVList(page int, pageSize int, key string, userTag string) ([]models.PersistentVolume, int64, error) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) error SelectOneScByName(name string) (models.Sc, bool) SelectOnePvByName(name string) (models.PersistentVolume, bool) UserRegister(userId int, scList []map[string]interface{}) error DeletescUserbyUser(userId int) (bool, string) AddscUserbyuser(userId int, scList []map[string]interface{}) (bool, string) } type storageService struct { Engine *xorm.Engine cs CommonService } func NewStorageService(engine *xorm.Engine, cs CommonService) StorageService { return &storageService{ Engine: engine, cs: cs, } } func (ss *storageService) PVList(page int, pageSize int, key string, userTag string) (list []models.PersistentVolume, count int64, err error)
ageService) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) (err error) { nameExist, _ := ss.Engine.Where("is_deploy = true").Exist(&models.ClusterInstance{Name: mysqlName}) if nameExist { return errors.New("This name already exists ") } exist, _ := ss.Engine.Exist(&models.ClusterInstance{PvId: pvId}) if exist { return errors.New("The MySQL created by the current pv already exists ") } dbPV := models.PersistentVolume{Id: pvId} _, err = ss.Engine.Get(&dbPV) if err != nil { return } // 检查pv是否存在 err, pvSource := ss.cs.GetResources("pv", dbPV.Name, ss.cs.GetNameSpace(), meta1.GetOptions{}) if err != nil { return fmt.Errorf("pv does not exist in k8s, err: %v", err) } // 根据pv查询已删除的pod pod := models.Instance{Id: dbPV.PodId} existPod, err := ss.Engine.Unscoped().Cols("cluster_id").Get(&pod) if !existPod { return errors.New(fmt.Sprintf("Found pod is error: %v", err)) } cluster := models.ClusterInstance{Id: pod.ClusterId} existCluster, err := ss.Engine.Unscoped().Cols("user_id", "image_id", "storage", "user_tag", "org_tag", "secret").Get(&cluster) if !existCluster { return errors.New(fmt.Sprintf("Found cluster is error: %v", err)) } limitMem, limitCpu := int(storageMap["mem"].(float64)), int(storageMap["cpu"].(float64)) enough, msg, _ := getUserResource(userId, limitCpu, limitMem, cluster.Storage, ss.Engine) if !enough { return errors.New(msg) } pvcName := mysqlName + "-pvc" scName := mysqlName + "-sc" svcName := mysqlName + "-svc" mysqlImage := models.Images{Id: cluster.ImageId} hasImage, err := ss.Engine.Get(&mysqlImage) if !hasImage { return errors.New(fmt.Sprintf("Found mysql image is error: %v", err)) } var imageURL string if mysqlImage.Status == "Invalid" { imageURL = fmt.Sprintf("%v:%v", mysqlImage.ImageName, mysqlImage.Version) } else { imageURL = fmt.Sprintf("%v/%v:%v", getImageAddress(ss.Engine), mysqlImage.ImageName, mysqlImage.Version) } // 设置pv的sc名称 pv := (*pvSource).(*core1.PersistentVolume) pv.Spec.ClaimRef = nil pv.Spec.StorageClassName = scName _, err = ss.cs.GetClientSet().CoreV1().PersistentVolumes().Update(*ss.cs.GetCtx(), pv, meta1.UpdateOptions{}) if err != nil { return } pvcConfig := core1.PersistentVolumeClaim{ TypeMeta: meta1.TypeMeta{Kind: "PersistentVolumeClaim", APIVersion: "v1"}, ObjectMeta: meta1.ObjectMeta{Name: pvcName}, Spec: core1.PersistentVolumeClaimSpec{ StorageClassName: &scName, AccessModes: []core1.PersistentVolumeAccessMode{core1.ReadWriteOnce}, Resources: core1.ResourceRequirements{ Requests: map[core1.ResourceName]resource.Quantity{ "storage": resource.MustParse(dbPV.Capacity), }, }, }, } err = ss.cs.CreateOption("pvc", ss.cs.GetNameSpace(), &pvcConfig, meta1.CreateOptions{}) if err != nil { return } svcConfig := core1.Service{ TypeMeta: meta1.TypeMeta{Kind: "Service", APIVersion: "v1"}, ObjectMeta: meta1.ObjectMeta{Name: svcName}, Spec: core1.ServiceSpec{ Type: core1.ServiceTypeNodePort, Ports: []core1.ServicePort{ {Name: "mysqlport", Port: 3306, TargetPort: intstr.FromInt(3306)}, {Name: "sidecar-ttyd", Port: 7681, TargetPort: intstr.FromInt(7681)}, }, Selector: map[string]string{"app": "mysql"}, }, } err = ss.cs.CreateOption("service", ss.cs.GetNameSpace(), &svcConfig, meta1.CreateOptions{}) if err != nil { return } secretMap := map[string]string{} err = json.Unmarshal([]byte(cluster.Secret), &secretMap) if err != nil { return } mysqlConfig := appsv1.Deployment{ TypeMeta: meta1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, ObjectMeta: meta1.ObjectMeta{Name: mysqlName}, Spec: appsv1.DeploymentSpec{ Selector: &meta1.LabelSelector{MatchLabels: map[string]string{"app": "mysql"}}, Strategy: appsv1.DeploymentStrategy{Type: appsv1.RecreateDeploymentStrategyType}, Template: core1.PodTemplateSpec{ ObjectMeta: meta1.ObjectMeta{Labels: map[string]string{"app": "mysql"}}, Spec: core1.PodSpec{ Containers: []core1.Container{ { Name: "mysql", Image: imageURL, Resources: core1.ResourceRequirements{ Limits: core1.ResourceList{ "memory": resource.MustParse(fmt.Sprintf("%vGi", limitMem)), "cpu": resource.MustParse(strconv.Itoa(limitCpu)), }, }, Env: []core1.EnvVar{{Name: "MYSQL_ROOT_PASSWORD", Value: secretMap["ROOT_PASSWORD"]}}, Ports: []core1.ContainerPort{{Name: "mysql", ContainerPort: 3306}}, VolumeMounts: []core1.VolumeMount{{Name: "mysql-persistent-storage", MountPath: "/var/lib/mysql"}}, }, { Name: "sidecar", Image: "10.45.10.107:8099/k8s/mysql-sidecar:ttyd", Ports: []core1.ContainerPort{ {Name: "mysql", ContainerPort: 7681}, }, }, }, Volumes: []core1.Volume{ {Name: "mysql-persistent-storage", VolumeSource: core1.VolumeSource{ PersistentVolumeClaim: &core1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}, }}, }, }, }, }, } _, err = ss.cs.GetClientSet().AppsV1().Deployments(ss.cs.GetNameSpace()).Create(*ss.cs.GetCtx(), &mysqlConfig, meta1.CreateOptions{}) if err != nil { return err } // 查找pod名称 var podName string for i := 0; i < 10; i++ { err, podListSource := ss.cs.GetResources("pod", "", ss.cs.GetNameSpace(), meta1.ListOptions{LabelSelector: "app=mysql"}) if err != nil { utils.LoggerError(err) <-time.After(time.Second) continue } podList := (*podListSource).(*core1.PodList) for _, pod := range podList.Items { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim == nil { continue } if volume.PersistentVolumeClaim.ClaimName == pvcName { podName = pod.Name goto FindPodNameEnd } } } <-time.After(time.Second) } FindPodNameEnd: clusterIns := models.ClusterInstance{ Name: mysqlName, K8sName: podName, Status: models.ClusterStatusCreating, Storage: cluster.Storage, UserId: cluster.UserId, ImageId: cluster.ImageId, ScName: scName, Replicas: "1", LimitCpu: limitCpu, LimitMem: limitMem, Remark: remark, UserTag: cluster.UserTag, OrgTag: cluster.OrgTag, IsDeploy: true, Secret: cluster.Secret, PvId: pvId, } _, err = ss.Engine.Insert(&clusterIns) if err == nil { if qos != nil { qos.ClusterId = clusterIns.Id _, _ = ss.Engine.Insert(qos) go ss.cs.SetQosConfig(dbPV.Name, clusterIns.Id) } go ss.cs.CreatStatusTimeout(clusterIns.Id) go ss.cs.ScanClusterPod(clusterIns.Id, clusterIns.K8sName, 1, true) go ss.cs.PollingPVStatus([]models.PersistentVolume{dbPV}, core1.VolumeBound) dbPV.PvcName = pvcName _, _ = ss.Engine.ID(dbPV.Id).Cols("pvc_name").Update(&dbPV) } return err } // UserAssign userIdStr为-1时表示ALL,为空时表示无租户,有租户时以逗号分割 func (ss *storageService) UserAssign(id int, userIdStr string) error { if id <= 0 { return errors.New("storage id must > 0") } sc := models.Sc{Id: id} exist, err := ss.Engine.Cols("name", "sc_type").Get(&sc) if !exist { return fmt.Errorf("not found sc %v, error: %v", id, err) } assignAll := userIdStr == "-1" if assignAll { if sc.ScType == models.ScTypeUnique { return errors.New("the unique storage cannot be set to ALL") } _, _ = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser)) } else { dbUsers := make([]models.ScUser, 0) err = ss.Engine.Where("sc_id = ?", id).Find(&dbUsers) if err != nil { return err } userStrList := strings.Split(userIdStr, ",") if sc.ScType == models.ScTypeUnique { // 处理独占存储 var clusterCount int64 clusterCount, err = ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { return response.NewMsg("This storage is already in use and cannot be modified", "此存储已被使用,无法修改") } if len(userStrList) <= 0 { _, err = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser)) return err } if len(userStrList) != 1 { return errors.New("unique-storage only assign one user") } var userId int userId, err = strconv.Atoi(userStrList[0]) if err != nil { return err } scUser := models.ScUser{UserId: userId, ScId: id} if len(dbUsers) == 0 { _, err = ss.Engine.Insert(&scUser) } else { _, err = ss.Engine.Where("sc_id = ?", id).Update(&scUser) } } else { // <userId,index> dbUsersM := map[int]int{} for i := range dbUsers { dbUsersM[dbUsers[i].UserId] = i } insertList := make([]models.ScUser, 0) for i := range userStrList { if len(userStrList[i]) == 0 { continue } var userId, err = strconv.Atoi(userStrList[i]) if err != nil { utils.LoggerError(err) continue } if _, ok := dbUsersM[userId]; ok { // 数据库已存在直接跳过,并标记为-1 dbUsersM[userId] = -1 continue } insertList = append(insertList, models.ScUser{ScId: id, UserId: userId}) } for _, v := range dbUsersM { if v != -1 { clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).And("user_id = ?", dbUsers[v].UserId).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { u := models.User{} _, _ = ss.Engine.ID(dbUsers[v].UserId).Cols("user_name").Get(&u) return response.NewMsg(fmt.Sprintf("%v have already used this storage, this user cannot be deleted", u.UserName), fmt.Sprintf("%v已使用此存储,不能删除此用户", u.UserName)) } } } if len(insertList) > 0 { _, err := ss.Engine.Insert(&insertList) if err != nil { return err } } for _, v := range dbUsersM { if v != -1 { _, _ = ss.Engine.ID(dbUsers[v].Id).Delete(new(models.ScUser)) } } } } _, err = ss.Engine.ID(id).Cols("assign_all").Update(&models.Sc{AssignAll: assignAll}) return err } func (ss *storageService) List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) { scPv := make([]models.Sc, 0) var count int64 if userId <= 0 && len(userTag) != 0 { u := models.User{} _, _ = ss.Engine.Where("user_tag = ?", userTag).Cols("id").Get(&u) userId = u.Id } if userId > 0 { scList := make([]models.ScUser, 0) err := ss.Engine.Where("user_id = ?", userId).Find(&scList) utils.LoggerError(err) for _, v := range scList { sc := models.Sc{Id: v.ScId} _, err = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Get(&sc) utils.LoggerError(err) if len(sc.Name) > 0 { scPv = append(scPv, sc) } } assignAllSc := make([]models.Sc, 0) err = ss.Engine.Where("assign_all = true").Find(&assignAllSc) utils.LoggerError(err) if len(assignAllSc) > 0 { scPv = append(scPv, assignAllSc...) } count = int64(len(scPv)) if utils.MustInt(page, pageSize) { min := pageSize * (page - 1) max := min + pageSize scPv = scPv[min:utils.Min(max, len(scPv))] } } else { err := ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&scPv) utils.LoggerError(err) count, _ = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Count(&models.Sc{}) } scReturn := make([]models.ReturnSc, len(scPv)) for i, sc := range scPv { pv := make([]models.PersistentVolume, 0) if sc.ScType == models.ScTypeUnique { err := ss.Engine.Where(" sc_id = ?", sc.Id).OrderBy("id").Find(&pv) utils.LoggerError(err) sc.NodeNum = len(pv) } cluster := make([]models.ClusterInstance, 0) err := ss.Engine.Where("sc_name = ?", sc.Name).Omit("yaml_text").Find(&cluster) utils.LoggerError(err) var scUserRaw json.RawMessage if sc.AssignAll { scUserRaw = []byte("-1") } else { scUser := make([]models.ScUser, 0) err = ss.Engine.Where("sc_id = ?", sc.Id).Find(&scUser) utils.LoggerError(err) for i, user := range scUser { u := models.User{Id: user.UserId} _, err = ss.Engine.Get(&u) utils.LoggerError(err) scUser[i].UserName = u.UserName } scUserRaw, _ = json.Marshal(scUser) } scReturn[i] = models.ReturnSc{Sc: sc, Children: pv, Cluster: cluster, ScUser: scUserRaw} } if isFilter { for i, sc := range scReturn { if sc.ScType == models.ScTypeUnique && len(sc.Cluster) > 0 { scReturn = append(scReturn[0:i], scReturn[i+1:]...) } } } return scReturn, count } func (ss *storageService) Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) { sc := models.Sc{ Name: scName, ScType: scType, NodeNum: nodeNum, ReclaimPolicy: reclaimPolicy, Describe: remark, OrgTag: orgTag, UserTag: userTag, AssignAll: userIdStr == "-1", } namespace := ss.cs.GetNameSpace() // 独有存储,在k8s里面新建 if scType == "unique-storage" { reclaimPolicyCore := core1.PersistentVolumeReclaimPolicy(reclaimPolicy) scConfig := storage1.StorageClass{ TypeMeta: meta1.TypeMeta{ Kind: "StorageClass", APIVersion: "storage.k8s.io/v1", }, ObjectMeta: meta1.ObjectMeta{ Name: scName, }, Provisioner: "kubernetes.io/no-provisioner", ReclaimPolicy: &reclaimPolicyCore, } err := ss.cs.CreateOption("sc", namespace, &scConfig, meta1.CreateOptions{}) if err != nil { return sc, err } } else { err, scAddr := ss.cs.GetResources("sc", scName, namespace, meta1.GetOptions{}) if err != nil { return sc, err } if value, ok := (*scAddr).(*storage1.StorageClass); ok { sc.ReclaimPolicy = string(*value.ReclaimPolicy) } } _, err := ss.Engine.Insert(&sc) if err != nil || sc.AssignAll { return sc, err } userIds := strings.Split(userIdStr, ",") su := make([]models.ScUser, 0) for i := range userIds { if len(userIds[i]) == 0 { continue } id, err := strconv.Atoi(userIds[i]) if err != nil { utils.LoggerError(err) continue } su = append(su, models.ScUser{UserId: id, ScId: sc.Id}) } if userId > 0 { su = append(su, models.ScUser{UserId: userId, ScId: sc.Id}) } if len(su) > 0 { _, _ = ss.Engine.Insert(&su) } return sc, nil } func (ss *storageService) Update(id int, remake string, nodeNum int) error { sc := models.Sc{ Id: id, Describe: remake, NodeNum: nodeNum, } _, err := ss.Engine.ID(sc.Id).Update(&sc) return err } func (ss *storageService) Delete(id int) error { if id <= 0 { return errors.New("storage id must > 0") } sc := models.Sc{ Id: id, } exist, err := ss.Engine.Get(&sc) if err != nil { return err } if !exist { return nil } clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { return response.NewMsg("This storage is occupied by the cluster and cannot be deleted", "此存储被集群占用,无法删除!") } if sc.ScType == models.ScTypeUnique { pvCount, err := ss.Engine.Where("sc_id = ?", id).Count(new(models.PersistentVolume)) if err != nil { return err } if pvCount > 0 { return response.NewMsg("This store has pv and cannot be deleted", "请先删除此存储下的pv!") } err = ss.cs.DeleteOption("sc", sc.Name, ss.cs.GetNameSpace(), meta1.DeleteOptions{}) if err != nil { return err } } _, err = ss.Engine.ID(sc.Id).Delete(&sc) _, _ = ss.Engine.Where("sc_id = ?", sc.Id).Delete(new(models.ScUser)) return err } func (ss *storageService) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) { ipAddr := "" port := "" if strings.Contains(mountPoint, ":") { ipAddr = strings.Split(mountPoint, ":")[0] port = strings.Split(mountPoint, ":")[1] } else { return false, models.PersistentVolume{}, "mountPoint format error" } pv := models.PersistentVolume{ Name: pvName, ScId: storageId, Lun: lun, Capacity: size, Iqn: iqn, IpAddr: ipAddr, Port: port, UserTag: userTag, OrgTag: orgTag, } sc := models.Sc{Id: storageId} success, err := ss.Engine.Get(&sc) utils.LoggerError(err) if !success { if err != nil { return success, pv, err.Error() } else { return success, pv, "" } } scConfig := core1.PersistentVolume{ TypeMeta: meta1.TypeMeta{ Kind: "PersistentVolume", APIVersion: "v1", }, ObjectMeta: meta1.ObjectMeta{ Name: pvName, }, Spec: core1.PersistentVolumeSpec{ Capacity: core1.ResourceList{ core1.ResourceName("storage"): resource.MustParse(fmt.Sprintf("%sGi", size)), }, AccessModes: []core1.PersistentVolumeAccessMode{core1.PersistentVolumeAccessMode("ReadWriteOnce")}, StorageClassName: sc.Name, PersistentVolumeSource: core1.PersistentVolumeSource{ ISCSI: &core1.ISCSIPersistentVolumeSource{ TargetPortal: mountPoint, IQN: iqn, ISCSIInterface: "iser", Lun: int32(lun), FSType: "xfs", ReadOnly: false, }, }, }, } err = ss.cs.CreateOption("pv", namespace, &scConfig, meta1.CreateOptions{}) if err != nil { return false, pv, err.Error() } _, err = ss.Engine.Insert(&pv) if err != nil { return false, pv, err.Error() } return true, pv, "" } func (ss *storageService) PvDelete(id int) (err error) { pv := models.PersistentVolume{Id: id} hasPV, err := ss.Engine.Get(&pv) if !hasPV { return fmt.Errorf("Not found pv %v error: %s ", id, err) } if pv.Status == string(core1.VolumeBound) { return errors.New("Bound status cannot be deleted ") } sc := models.Sc{Id: pv.ScId} hasSc, err := ss.Engine.Get(&sc) if !hasSc { return fmt.Errorf("Not found sc %v error: %s ", pv.ScId, err) } if sc.ScType != "shared-storage" { // 判断没有没cluster集群占用 existCluster, err := ss.Engine.Exist(&models.ClusterInstance{ScName: sc.Name}) if err != nil { return err } if existCluster { return errors.New("There are clusters in this PV ") } } err = ss.cs.DeleteOption("pv", pv.Name, "", meta1.DeleteOptions{}) if err != nil && !utils.ErrorContains(err, "not found") { return } _, err = ss.Engine.ID(pv.Id).Delete(&pv) if err != nil { return } return nil } func (ss *storageService) SelectOneScByName(name string) (models.Sc, bool) { var sc models.Sc _, err := ss.Engine.Where(" name = ? ", name).Get(&sc) utils.LoggerError(err) return sc, err == nil } func (ss *storageService) SelectOnePvByName(name string) (models.PersistentVolume, bool) { var pv models.PersistentVolume _, err := ss.Engine.Where(" name = ? ", name).Get(&pv) utils.LoggerError(err) return pv, err == nil } func (ss *storageService) UserRegister(userId int, scList []map[string]interface{}) error { session := ss.Engine.NewSession() defer session.Close() if err := session.Begin(); err != nil { return err } scUser := models.ScUser{UserId: userId} _, err := session.Delete(&scUser) if err != nil { return err } if len(scList) > 0 { for _, scInfo := range scList { if scInfo["type"] == "ready" { scId := int(scInfo["id"].(float64)) sc := models.Sc{Id: scId} success, err := session.Cols("sc_type").Get(&sc) if !success { _ = session.Rollback() return fmt.Errorf("not found sc %v, error: %v", scId, err) } count, err := session.Where(" sc_id = ? ", scId).Count(new(models.ScUser)) if err != nil { _ = session.Rollback() return err } if sc.ScType == "unique-storage" && count > 1 { return errors.New("unique-storage only assign one user") } scUser := models.ScUser{UserId: userId, ScId: sc.Id} _, err = session.Insert(&scUser) if err != nil { _ = session.Rollback() return err } } } } err = session.Commit() return err } func (ss *storageService) DeletescUserbyUser(userId int) (bool, string) { scUser := models.ScUser{UserId: userId} _, err := ss.Engine.Delete(&scUser) if err != nil { utils.LoggerError(err) return true, err.Error() } return true, "" } func (ss *storageService) AddscUserbyuser(userId int, scList []map[string]interface{}) (bool, string) { session := ss.Engine.NewSession() defer session.Close() if err := session.Begin(); err != nil { iris.New().Logger().Info(err.Error()) } if len(scList) > 0 { for _, scInfo := range scList { if scInfo["type"] == "ready" { scId := int(scInfo["id"].(float64)) sc := models.Sc{Id: scId} success, err := session.Get(&sc) if !success { if err != nil { session.Rollback() return false, err.Error() } else { session.Rollback() return false, "" } } scUserList := make([]models.ScUser, 0) err = session.Where(" sc_id = ? ", scId).Find(&scUserList) if err != nil { session.Rollback() return false, err.Error() } if sc.ScType == "unique-storage" { if len(scUserList) > 1 { return false, "unique-storage only assign one user" } } scUser := models.ScUser{UserId: userId, ScId: sc.Id} _, err = session.Insert(&scUser) if err != nil { session.Rollback() utils.LoggerError(err) return false, err.Error() } } } } session.Commit() return true, "" }
{ list = make([]models.PersistentVolume, 0) where := "name like ?" args := []interface{}{"%" + key + "%"} // AAAA为root用户可查看所有PV if userTag != "AAAA" { where += " AND user_tag = ?" args = append(args, userTag) } err = ss.Engine.Where(where, args...).Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&list) if err != nil { return } count, _ = ss.Engine.Where(where, args...).Count(&models.PersistentVolume{}) userCache := map[string]models.User{} for i := range list { tag := list[i].UserTag user, ok := userCache[tag] if !ok { user = models.User{UserTag: tag} _, _ = ss.Engine.Get(&user) userCache[tag] = user } list[i].UserId = user.Id list[i].Tenant = user.UserName list[i].CpuTotal = user.CpuAll list[i].MemTotal = int(user.MemAll) pod := models.Instance{Id: list[i].PodId} _, _ = ss.Engine.Cols("name").Get(&pod) list[i].PodName = pod.Name sc := models.Sc{Id: list[i].ScId} _, _ = ss.Engine.Cols("name").Get(&sc) list[i].SCName = sc.Name } return } func (ss *stor
identifier_body
storage_service.go
/* * @Description: * @version: * @Company: iwhalecloud * @Author: ddh * @Date: 2021-02-07 16:32:07 * @LastEditors: ddh * @LastEditTime: 2021-02-07 16:32:07 */ package service import ( "DBaas/models" "DBaas/utils" "DBaas/x/response" "encoding/json" "errors" "fmt" "github.com/go-xorm/xorm" "github.com/kataras/iris/v12" appsv1 "k8s.io/api/apps/v1" core1 "k8s.io/api/core/v1" storage1 "k8s.io/api/storage/v1" "k8s.io/apimachinery/pkg/api/resource" meta1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/intstr" "strconv" "strings" "time" ) type StorageService interface { UserAssign(id int, userIdStr string) error List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) Update(id int, remake string, nodeNum int) error Delete(id int) error Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) PvDelete(id int) (err error) PVList(page int, pageSize int, key string, userTag string) ([]models.PersistentVolume, int64, error) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) error SelectOneScByName(name string) (models.Sc, bool) SelectOnePvByName(name string) (models.PersistentVolume, bool) UserRegister(userId int, scList []map[string]interface{}) error DeletescUserbyUser(userId int) (bool, string) AddscUserbyuser(userId int, scList []map[string]interface{}) (bool, string) } type storageService struct { Engine *xorm.Engine cs CommonService } func NewStorageService(engine *xorm.Engine, cs CommonService) StorageService { return &storageService{ Engine: engine, cs: cs, } } func (ss *storageService) PVList(page int, pageSize int, key string, userTag string) (list []models.PersistentVolume, count int64, err error) { list = make([]models.PersistentVolume, 0) where := "name like ?" args := []interface{}{"%" + key + "%"} // AAAA为root用户可查看所有PV if userTag != "AAAA" { where += " AND user_tag = ?" args = append(args, userTag) } err = ss.Engine.Where(where, args...).Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&list) if err != nil { return } count, _ = ss.Engine.Where(where, args...).Count(&models.PersistentVolume{}) userCache := map[string]models.User{} for i := range list { tag := list[i].UserTag user, ok := userCache[tag] if !ok { user = models.User{UserTag: tag} _, _ = ss.Engine.Get(&user) userCache[tag] = user } list[i].UserId = user.Id list[i].Tenant = user.UserName list[i].CpuTotal = user.CpuAll list[i].MemTotal = int(user.MemAll) pod := models.Instance{Id: list[i].PodId} _, _ = ss.Engine.Cols("name").Get(&pod) list[i].PodName = pod.Name sc := models.Sc{Id: list[i].ScId} _, _ = ss.Engine.Cols("name").Get(&sc) list[i].SCName = sc.Name } return } func (ss *storageService) CreateMysqlByPV(pvId int, storageMap map[string]interface{}, remark string, userId int, mysqlName string, qos *models.Qos) (err error) { nameExist, _ := ss.Engine.Where("is_deploy = true").Exist(&models.ClusterInstance{Name: mysqlName}) if nameExist { return errors.New("This name already exists ") } exist, _ := ss.Engine.Exist(&models.ClusterInstance{PvId: pvId}) if exist { return errors.New("The MySQL created by the current pv already exists ") } dbPV := models.PersistentVolume{Id: pvId} _, err = ss.Engine.Get(&dbPV) if err != nil { return } // 检查pv是否存在 err, pvSource := ss.cs.GetResources("pv", dbPV.Name, ss.cs.GetNameSpace(), meta1.GetOptions{}) if err != nil { return fmt.Errorf("pv does not exist in k8s, err: %v", err) } // 根据pv查询已删除的pod pod := models.Instance{Id: dbPV.PodId} existPod, err := ss.Engine.Unscoped().Cols("cluster_id").Get(&pod) if !existPod { return errors.New(fmt.Sprintf("Found pod is error: %v", err)) } cluster := models.ClusterInstance{Id: pod.ClusterId} existCluster, err := ss.Engine.Unscoped().Cols("user_id", "image_id", "storage", "user_tag", "org_tag", "secret").Get(&cluster) if !existCluster { return errors.New(fmt.Sprintf("Found cluster is error: %v", err)) } limitMem, limitCpu := int(storageMap["mem"].(float64)), int(storageMap["cpu"].(float64)) enough, msg, _ := getUserResource(userId, limitCpu, limitMem, cluster.Storage, ss.Engine) if !enough { return errors.New(msg) } pvcName := mysqlName + "-pvc" scName := mysqlName + "-sc" svcName := mysqlName + "-svc" mysqlImage := models.Images{Id: cluster.ImageId} hasImage, err := ss.Engine.Get(&mysqlImage) if !hasImage { return errors.New(fmt.Sprintf("Found mysql image is error: %v", err)) } var imageURL string if mysqlImage.Status == "Invalid" { imageURL = fmt.Sprintf("%v:%v", mysqlImage.ImageName, mysqlImage.Version) } else { imageURL = fmt.Sprintf("%v/%v:%v", getImageAddress(ss.Engine), mysqlImage.ImageName, mysqlImage.Version) } // 设置pv的sc名称 pv := (*pvSource).(*core1.PersistentVolume) pv.Spec.ClaimRef = nil pv.Spec.StorageClassName = scName _, err = ss.cs.GetClientSet().CoreV1().PersistentVolumes().Update(*ss.cs.GetCtx(), pv, meta1.UpdateOptions{}) if err != nil { return } pvcConfig := core1.PersistentVolumeClaim{ TypeMeta: meta1.TypeMeta{Kind: "PersistentVolumeClaim", APIVersion: "v1"}, ObjectMeta: meta1.ObjectMeta{Name: pvcName}, Spec: core1.PersistentVolumeClaimSpec{ StorageClassName: &scName, AccessModes: []core1.PersistentVolumeAccessMode{core1.ReadWriteOnce}, Resources: core1.ResourceRequirements{ Requests: map[core1.ResourceName]resource.Quantity{ "storage": resource.MustParse(dbPV.Capacity), }, }, }, } err = ss.cs.CreateOption("pvc", ss.cs.GetNameSpace(), &pvcConfig, meta1.CreateOptions{}) if err != nil { return } svcConfig := core1.Service{ TypeMeta: meta1.TypeMeta{Kind: "Service", APIVersion: "v1"}, ObjectMeta: meta1.ObjectMeta{Name: svcName}, Spec: core1.ServiceSpec{ Type: core1.ServiceTypeNodePort, Ports: []core1.ServicePort{ {Name: "mysqlport", Port: 3306, TargetPort: intstr.FromInt(3306)}, {Name: "sidecar-ttyd", Port: 7681, TargetPort: intstr.FromInt(7681)}, }, Selector: map[string]string{"app": "mysql"}, }, } err = ss.cs.CreateOption("service", ss.cs.GetNameSpace(), &svcConfig, meta1.CreateOptions{}) if err != nil { return } secretMap := map[string]string{} err = json.Unmarshal([]byte(cluster.Secret), &secretMap) if err != nil { return } mysqlConfig := appsv1.Deployment{ TypeMeta: meta1.TypeMeta{Kind: "Deployment", APIVersion: "apps/v1"}, ObjectMeta: meta1.ObjectMeta{Name: mysqlName}, Spec: appsv1.DeploymentSpec{ Selector: &meta1.LabelSelector{MatchLabels: map[string]string{"app": "mysql"}}, Strategy: appsv1.DeploymentStrategy{Type: appsv1.RecreateDeploymentStrategyType}, Template: core1.PodTemplateSpec{ ObjectMeta: meta1.ObjectMeta{Labels: map[string]string{"app": "mysql"}}, Spec: core1.PodSpec{ Containers: []core1.Container{ { Name: "mysql", Image: imageURL, Resources: core1.ResourceRequirements{ Limits: core1.ResourceList{ "memory": resource.MustParse(fmt.Sprintf("%vGi", limitMem)), "cpu": resource.MustParse(strconv.Itoa(limitCpu)), }, }, Env: []core1.EnvVar{{Name: "MYSQL_ROOT_PASSWORD", Value: secretMap["ROOT_PASSWORD"]}}, Ports: []core1.ContainerPort{{Name: "mysql", ContainerPort: 3306}}, VolumeMounts: []core1.VolumeMount{{Name: "mysql-persistent-storage", MountPath: "/var/lib/mysql"}}, }, { Name: "sidecar", Image: "10.45.10.107:8099/k8s/mysql-sidecar:ttyd", Ports: []core1.ContainerPort{ {Name: "mysql", ContainerPort: 7681}, }, }, }, Volumes: []core1.Volume{ {Name: "mysql-persistent-storage", VolumeSource: core1.VolumeSource{ PersistentVolumeClaim: &core1.PersistentVolumeClaimVolumeSource{ClaimName: pvcName}, }}, }, }, }, }, } _, err = ss.cs.GetClientSet().AppsV1().Deployments(ss.cs.GetNameSpace()).Create(*ss.cs.GetCtx(), &mysqlConfig, meta1.CreateOptions{}) if err != nil { return err } // 查找pod名称 var podName string for i := 0; i < 10; i++ { err, podListSource := ss.cs.GetResources("pod", "", ss.cs.GetNameSpace(), meta1.ListOptions{LabelSelector: "app=mysql"}) if err != nil { utils.LoggerError(err) <-time.After(time.Second) continue } podList := (*podListSource).(*core1.PodList) for _, pod := range podList.Items { for _, volume := range pod.Spec.Volumes { if volume.PersistentVolumeClaim == nil { continue } if volume.PersistentVolumeClaim.ClaimName == pvcName { podName = pod.Name goto FindPodNameEnd } } } <-time.After(time.Second) } FindPodNameEnd: clusterIns := models.ClusterInstance{ Name: mysqlName, K8sName: podName, Status: models.ClusterStatusCreating, Storage: cluster.Storage, UserId: cluster.UserId, ImageId: cluster.ImageId, ScName: scName, Replicas: "1", LimitCpu: limitCpu, LimitMem: limitMem, Remark: remark, UserTag: cluster.UserTag, OrgTag: cluster.OrgTag, IsDeploy: true, Secret: cluster.Secret, PvId: pvId, } _, err = ss.Engine.Insert(&clusterIns) if err == nil { if qos != nil { qos.ClusterId = clusterIns.Id _, _ = ss.Engine.Insert(qos) go ss.cs.SetQosConfig(dbPV.Name, clusterIns.Id) } go ss.cs.CreatStatusTimeout(clusterIns.Id) go ss.cs.ScanClusterPod(clusterIns.Id, clusterIns.K8sName, 1, true) go ss.cs.PollingPVStatus([]models.PersistentVolume{dbPV}, core1.VolumeBound) dbPV.PvcName = pvcName _, _ = ss.Engine.ID(dbPV.Id).Cols("pvc_name").Update(&dbPV) } return err } // UserAssign userIdStr为-1时表示ALL,为空时表示无租户,有租户时以逗号分割 func (ss *storageService) UserAssign(id int, userIdStr string) error { if id <= 0 { return errors.New("storage id must > 0") } sc := models.Sc{Id: id} exist, err := ss.Engine.Cols("name", "sc_type").Get(&sc) if !exist { return fmt.Errorf("not found sc %v, error: %v", id, err) } assignAll := userIdStr == "-1" if assignAll { if sc.ScType == models.ScTypeUnique { return errors.New("the unique storage cannot be set to ALL") } _, _ = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser)) } else { dbUsers := make([]models.ScUser, 0) err = ss.Engine.Where("sc_id = ?", id).Find(&dbUsers) if err != nil { return err } userStrList := strings.Split(userIdStr, ",") if sc.ScType == models.ScTypeUnique { // 处理独占存储 var clusterCount int64 clusterCount, err = ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { return response.NewMsg("This storage is already in use and cannot be modified", "此存储已被使用,无法修改") } if len(userStrList) <= 0 { _, err = ss.Engine.Where("sc_id = ?", id).Delete(new(models.ScUser)) return err } if len(userStrList) != 1 { return errors.New("unique-storage only assign one user") } var userId int userId, err = strconv.Atoi(userStrList[0]) if err != nil { return err } scUser := models.ScUser{UserId: userId, ScId: id} if len(dbUsers) == 0 { _, err = ss.Engine.Insert(&scUser) } else { _, err = ss.Engine.Where("sc_id = ?", id).Update(&scUser) } } else { // <userId,index> dbUsersM := map[int]int{} for i := range dbUsers { dbUsersM[dbUsers[i].UserId] = i } insertList := make([]models.ScUser, 0) for i := range userStrList { if len(userStrList[i]) == 0 { continue } var userId, err = strconv.Atoi(userStrList[i]) if err != nil { utils.LoggerError(err) continue } if _, ok := dbUsersM[userId]; ok { // 数据库已存在直接跳过,并标记为-1 dbUsersM[userId] = -1 continue } insertList = append(insertList, models.ScUser{ScId: id, UserId: userId}) } for _, v := range dbUsersM { if v != -1 { clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).And("user_id = ?", dbUsers[v].UserId).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { u := models.User{} _, _ = ss.Engine.ID(dbUsers[v].UserId).Cols("user_name").Get(&u) return response.NewMsg(fmt.Sprintf("%v have already used this storage, this user cannot be deleted", u.UserName), fmt.Sprintf("%v已使用此存储,不能删除此用户", u.UserName)) } } } if len(insertList) > 0 { _, err := ss.Engine.Insert(&insertList) if err != nil { return err } } for _, v := range dbUsersM { if v != -1 { _, _ = ss.Engine.ID(dbUsers[v].Id).Delete(new(models.ScUser)) } } } } _, err = ss.Engine.ID(id).Cols("assign_all").Update(&models.Sc{AssignAll: assignAll}) return err } func (ss *storageService) List(page int, pageSize int, key string, userId int, userTag string, isFilter bool) ([]models.ReturnSc, int64) { scPv := make([]models.Sc, 0) var count int64 if userId <= 0 && len(userTag) != 0 { u := models.User{} _, _ = ss.Engine.Where("user_tag = ?", userTag).Cols("id").Get(&u) userId = u.Id } if userId > 0 { scList := make([]models.ScUser, 0) err := ss.Engine.Where("user_id = ?", userId).Find(&scList) utils.LoggerError(err) for _, v := range scList { sc := models.Sc{Id: v.ScId} _, err = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Get(&sc) utils.LoggerError(err) if len(sc.Name) > 0 { scPv = append(scPv, sc) } } assignAllSc := make([]models.Sc, 0) err = ss.Engine.Where("assign_all = true").Find(&assignAllSc) utils.LoggerError(err) if len(assignAllSc) > 0 { scPv = append(scPv, assignAllSc...) } count = int64(len(scPv)) if utils.MustInt(page, pageSize) { min := pageSize * (page - 1) max := min + pageSize scPv = scPv[min:utils.Min(max, len(scPv))] } } else { err := ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Limit(pageSize, pageSize*(page-1)).Desc("id").Find(&scPv) utils.LoggerError(err) count, _ = ss.Engine.Where("name like ? or describe like ? ", "%"+key+"%", "%"+key+"%").Count(&models.Sc{}) } scReturn := make([]models.ReturnSc, len(scPv)) for i, sc := range scPv { pv := make([]models.PersistentVolume, 0) if sc.ScType == models.ScTypeUnique { err := ss.Engine.Where(" sc_id = ?", sc.Id).OrderBy("id").Find(&pv) utils.LoggerError(err) sc.NodeNum = len(pv) } cluster := make([]models.ClusterInstance, 0) err := ss.Engine.Where("sc_name = ?", sc.Name).Omit("yaml_text").Find(&cluster) utils.LoggerError(err) var scUserRaw json.RawMessage if sc.AssignAll { scUserRaw = []byte("-1") } else { scUser := make([]models.ScUser, 0) err = ss.Engine.Where("sc_id = ?", sc.Id).Find(&scUser) utils.LoggerError(err) for i, user := range scUser { u := models.User{Id: user.UserId} _, err = ss.Engine.Get(&u) utils.LoggerError(err) scUser[i].UserName = u.UserName } scUserRaw, _ = json.Marshal(scUser) } scReturn[i] = models.ReturnSc{Sc: sc, Children: pv, Cluster: cluster, ScUser: scUserRaw} } if isFilter { for i, sc := range scReturn { if sc.ScType == models.ScTypeUnique && len(sc.Cluster) > 0 { scReturn = append(scReturn[0:i], scReturn[i+1:]...) } } } return scReturn, count } func (ss *storageService) Add(scName string, reclaimPolicy string, remark string, orgTag string, userTag string, userId int, scType string, nodeNum int, userIdStr string) (models.Sc, error) { sc := models.Sc{ Name: scName, ScType: scType, NodeNum: nodeNum, ReclaimPolicy: reclaimPolicy, Describe: remark, OrgTag: orgTag, UserTag: userTag, AssignAll: userIdStr == "-1", } namespace := ss.cs.GetNameSpace() // 独有存储,在k8s里面新建 if scType == "unique-storage" { reclaimPolicyCore := core1.PersistentVolumeReclaimPolicy(reclaimPolicy) scConfig := storage1.StorageClass{ TypeMeta: meta1.TypeMeta{ Kind: "StorageClass", APIVersion: "storage.k8s.io/v1", }, ObjectMeta: meta1.ObjectMeta{ Name: scName, }, Provisioner: "kubernetes.io/no-provisioner", ReclaimPolicy: &reclaimPolicyCore, } err := ss.cs.CreateOption("sc", namespace, &scConfig, meta1.CreateOptions{}) if err != nil { return sc, err } } else { err, scAddr := ss.cs.GetResources("sc", scName, namespace, meta1.GetOptions{}) if err != nil { return sc, err } if value, ok := (*scAddr).(*storage1.StorageClass); ok { sc.ReclaimPolicy = string(*value.ReclaimPolicy) } } _, err := ss.Engine.Insert(&sc) if err != nil || sc.AssignAll { return sc, err } userIds := strings.Split(userIdStr, ",") su := make([]models.ScUser, 0) for i := range userIds { if len(userIds[i]) == 0 { continue } id, err := strconv.Atoi(userIds[i]) if err != nil { utils.LoggerError(err) continue } su = append(su, models.ScUser{UserId: id, ScId: sc.Id}) } if userId > 0 { su = append(su, models.ScUser{UserId: userId, ScId: sc.Id}) } if len(su) > 0 { _, _ = ss.Engine.Insert(&su) } return sc, nil } func (ss *storageService) Update(id int, remake string, nodeNum int) error { sc := models.Sc{ Id: id, Describe: remake, NodeNum: nodeNum, } _, err := ss.Engine.ID(sc.Id).Update(&sc) return err } func (ss *storageService) Delete(id int) error { if id <= 0 { return errors.New("storage id must > 0") } sc := models.Sc{ Id: id, } exist, err := ss.Engine.Get(&sc) if err != nil { return err } if !exist { return nil } clusterCount, err := ss.Engine.Where("sc_name = ?", sc.Name).Count(new(models.ClusterInstance)) if err != nil { return err } if clusterCount > 0 { return response.NewMsg("This storage is occupie
and cannot be deleted", "此存储被集群占用,无法删除!") } if sc.ScType == models.ScTypeUnique { pvCount, err := ss.Engine.Where("sc_id = ?", id).Count(new(models.PersistentVolume)) if err != nil { return err } if pvCount > 0 { return response.NewMsg("This store has pv and cannot be deleted", "请先删除此存储下的pv!") } err = ss.cs.DeleteOption("sc", sc.Name, ss.cs.GetNameSpace(), meta1.DeleteOptions{}) if err != nil { return err } } _, err = ss.Engine.ID(sc.Id).Delete(&sc) _, _ = ss.Engine.Where("sc_id = ?", sc.Id).Delete(new(models.ScUser)) return err } func (ss *storageService) PvAdd(storageId int, pvName string, mountPoint string, iqn string, lun int, size string, userTag string, orgTag string, namespace string) (bool, models.PersistentVolume, string) { ipAddr := "" port := "" if strings.Contains(mountPoint, ":") { ipAddr = strings.Split(mountPoint, ":")[0] port = strings.Split(mountPoint, ":")[1] } else { return false, models.PersistentVolume{}, "mountPoint format error" } pv := models.PersistentVolume{ Name: pvName, ScId: storageId, Lun: lun, Capacity: size, Iqn: iqn, IpAddr: ipAddr, Port: port, UserTag: userTag, OrgTag: orgTag, } sc := models.Sc{Id: storageId} success, err := ss.Engine.Get(&sc) utils.LoggerError(err) if !success { if err != nil { return success, pv, err.Error() } else { return success, pv, "" } } scConfig := core1.PersistentVolume{ TypeMeta: meta1.TypeMeta{ Kind: "PersistentVolume", APIVersion: "v1", }, ObjectMeta: meta1.ObjectMeta{ Name: pvName, }, Spec: core1.PersistentVolumeSpec{ Capacity: core1.ResourceList{ core1.ResourceName("storage"): resource.MustParse(fmt.Sprintf("%sGi", size)), }, AccessModes: []core1.PersistentVolumeAccessMode{core1.PersistentVolumeAccessMode("ReadWriteOnce")}, StorageClassName: sc.Name, PersistentVolumeSource: core1.PersistentVolumeSource{ ISCSI: &core1.ISCSIPersistentVolumeSource{ TargetPortal: mountPoint, IQN: iqn, ISCSIInterface: "iser", Lun: int32(lun), FSType: "xfs", ReadOnly: false, }, }, }, } err = ss.cs.CreateOption("pv", namespace, &scConfig, meta1.CreateOptions{}) if err != nil { return false, pv, err.Error() } _, err = ss.Engine.Insert(&pv) if err != nil { return false, pv, err.Error() } return true, pv, "" } func (ss *storageService) PvDelete(id int) (err error) { pv := models.PersistentVolume{Id: id} hasPV, err := ss.Engine.Get(&pv) if !hasPV { return fmt.Errorf("Not found pv %v error: %s ", id, err) } if pv.Status == string(core1.VolumeBound) { return errors.New("Bound status cannot be deleted ") } sc := models.Sc{Id: pv.ScId} hasSc, err := ss.Engine.Get(&sc) if !hasSc { return fmt.Errorf("Not found sc %v error: %s ", pv.ScId, err) } if sc.ScType != "shared-storage" { // 判断没有没cluster集群占用 existCluster, err := ss.Engine.Exist(&models.ClusterInstance{ScName: sc.Name}) if err != nil { return err } if existCluster { return errors.New("There are clusters in this PV ") } } err = ss.cs.DeleteOption("pv", pv.Name, "", meta1.DeleteOptions{}) if err != nil && !utils.ErrorContains(err, "not found") { return } _, err = ss.Engine.ID(pv.Id).Delete(&pv) if err != nil { return } return nil } func (ss *storageService) SelectOneScByName(name string) (models.Sc, bool) { var sc models.Sc _, err := ss.Engine.Where(" name = ? ", name).Get(&sc) utils.LoggerError(err) return sc, err == nil } func (ss *storageService) SelectOnePvByName(name string) (models.PersistentVolume, bool) { var pv models.PersistentVolume _, err := ss.Engine.Where(" name = ? ", name).Get(&pv) utils.LoggerError(err) return pv, err == nil } func (ss *storageService) UserRegister(userId int, scList []map[string]interface{}) error { session := ss.Engine.NewSession() defer session.Close() if err := session.Begin(); err != nil { return err } scUser := models.ScUser{UserId: userId} _, err := session.Delete(&scUser) if err != nil { return err } if len(scList) > 0 { for _, scInfo := range scList { if scInfo["type"] == "ready" { scId := int(scInfo["id"].(float64)) sc := models.Sc{Id: scId} success, err := session.Cols("sc_type").Get(&sc) if !success { _ = session.Rollback() return fmt.Errorf("not found sc %v, error: %v", scId, err) } count, err := session.Where(" sc_id = ? ", scId).Count(new(models.ScUser)) if err != nil { _ = session.Rollback() return err } if sc.ScType == "unique-storage" && count > 1 { return errors.New("unique-storage only assign one user") } scUser := models.ScUser{UserId: userId, ScId: sc.Id} _, err = session.Insert(&scUser) if err != nil { _ = session.Rollback() return err } } } } err = session.Commit() return err } func (ss *storageService) DeletescUserbyUser(userId int) (bool, string) { scUser := models.ScUser{UserId: userId} _, err := ss.Engine.Delete(&scUser) if err != nil { utils.LoggerError(err) return true, err.Error() } return true, "" } func (ss *storageService) AddscUserbyuser(userId int, scList []map[string]interface{}) (bool, string) { session := ss.Engine.NewSession() defer session.Close() if err := session.Begin(); err != nil { iris.New().Logger().Info(err.Error()) } if len(scList) > 0 { for _, scInfo := range scList { if scInfo["type"] == "ready" { scId := int(scInfo["id"].(float64)) sc := models.Sc{Id: scId} success, err := session.Get(&sc) if !success { if err != nil { session.Rollback() return false, err.Error() } else { session.Rollback() return false, "" } } scUserList := make([]models.ScUser, 0) err = session.Where(" sc_id = ? ", scId).Find(&scUserList) if err != nil { session.Rollback() return false, err.Error() } if sc.ScType == "unique-storage" { if len(scUserList) > 1 { return false, "unique-storage only assign one user" } } scUser := models.ScUser{UserId: userId, ScId: sc.Id} _, err = session.Insert(&scUser) if err != nil { session.Rollback() utils.LoggerError(err) return false, err.Error() } } } } session.Commit() return true, "" }
d by the cluster
conditional_block
lib.rs
//! # Lattice Client //! //! This library provides a client that communicates with a waSCC lattice using //! the lattice protocol over the NATS message broker. All waSCC hosts compiled //! in lattice mode have the ability to automatically form self-healing, self-managing //! infrastructure-agnostic clusters called [lattices](https://wascc.dev/docs/lattice/overview/) extern crate log; #[macro_use] extern crate serde; use std::{collections::HashMap, path::PathBuf, time::Duration}; use crossbeam::Sender; use wascap::prelude::*; use controlplane::{ LaunchAck, LaunchAuctionRequest, LaunchAuctionResponse, LaunchCommand, TerminateCommand, }; pub use events::{BusEvent, CloudEvent}; use crate::controlplane::{ LaunchProviderCommand, ProviderAuctionRequest, ProviderAuctionResponse, ProviderLaunchAck, }; pub mod controlplane; mod events; pub const INVENTORY_ACTORS: &str = "inventory.actors"; pub const INVENTORY_HOSTS: &str = "inventory.hosts"; pub const INVENTORY_BINDINGS: &str = "inventory.bindings"; pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities"; pub const EVENTS: &str = "events"; const AUCTION_TIMEOUT_SECONDS: u64 = 5; /// A response to a lattice probe for inventory. Note that these responses are returned /// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the /// client is responsible for aggregating many of these replies. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum InventoryResponse { /// A single host probe response Host(HostProfile), /// A list of all registered actors within a host Actors { host: String, actors: Vec<Claims<Actor>>, }, /// A list of configuration bindings of actors originating from the given host Bindings { host: String, bindings: Vec<Binding>, }, /// A list of capability providers currently running within the given host Capabilities { host: String, capabilities: Vec<HostedCapability>, }, } /// An overview of host information #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct HostProfile { /// The public key (subject) of the host pub id: String, /// The host's labels pub labels: HashMap<String, String>, /// Host uptime in milliseconds pub uptime_ms: u128, } /// Represents an instance of a capability, which is a binding name and /// the capability descriptor #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct HostedCapability { pub binding_name: String, pub descriptor: wascc_codec::capabilities::CapabilityDescriptor, } /// Represents a single configuration binding from an actor to a capability ID and binding /// name, with the specified configuration values. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Binding { pub actor: String, pub capability_id: String, pub binding_name: String, pub configuration: HashMap<String, String>, } /// A client for interacting with the lattice pub struct Client { nc: nats::Connection, namespace: Option<String>, timeout: Duration, } impl Client { /// Creates a new lattice client, connecting to the NATS server at the /// given host with an optional set of credentials (JWT auth) pub fn new( host: &str, credsfile: Option<PathBuf>, call_timeout: Duration, namespace: Option<String>, ) -> Self { Client { nc: get_connection(host, credsfile), timeout: call_timeout, namespace, } } pub fn with_connection( nc: nats::Connection, call_timeout: Duration, namespace: Option<String>, ) -> Self { Client { nc, timeout: call_timeout, namespace, } } /// Retrieves the list of all hosts running within the lattice. If it takes a host longer /// than the call timeout period to reply to the probe, it will not be included in the list /// of hosts. pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> { let mut hosts = vec![]; let sub = self .nc .request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Host(h) = ir { hosts.push(h); } } Ok(hosts) } /// Retrieves a list of all bindings from actors to capabilities within the lattice (provided /// the host responds to the probe within the client timeout period) pub fn get_bindings( &self, ) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> { let mut host_bindings = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Bindings { bindings: b, host } = ir { host_bindings .entry(host) .and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b)) .or_insert(b.clone()); } } Ok(host_bindings) } /// Retrieves the list of all actors currently running within the lattice (as discovered within /// the client timeout period) pub fn get_actors( &self, ) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>>
/// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period) pub fn get_capabilities( &self, ) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>> { let mut host_caps = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Capabilities { host, capabilities } = ir { host_caps .entry(host) .and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities)) .or_insert(capabilities.clone()); } } Ok(host_caps) } /// Watches the lattice for bus events. This will create a subscription in a background thread, so callers /// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender /// half of a channel to receive the events pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> { let _sub = self .nc .subscribe(self.gen_subject(EVENTS).as_ref())? .with_handler(move |msg| { let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap(); let be: BusEvent = serde_json::from_str(&ce.data).unwrap(); let _ = sender.send(be); Ok(()) }); Ok(()) } /// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started) /// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor /// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable /// hosts. The actor to be launched is identified by an OCI registry reference pub fn perform_actor_launch_auction( &self, actor_id: &str, constraints: HashMap<String, String>, ) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> { let mut results = vec![]; let req = LaunchAuctionRequest::new(actor_id, constraints); let sub = self.nc.request_multi( self.gen_subject(&format!( "{}.{}", controlplane::CPLANE_PREFIX, controlplane::AUCTION_REQ )) .as_ref(), &serde_json::to_vec(&req)?, )?; for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) { let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?; results.push(resp); } Ok(results) } /// Performs an auction among all hosts on the lattice, requesting that the given capability provider /// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the /// provider given the constraints will respond to the auction pub fn perform_provider_launch_auction( &self, provider_ref: &str, binding_name: &str, constraints: HashMap<String, String>, ) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> { let mut results = vec![]; let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints); let sub = self.nc.request_multi( self.gen_subject(&format!( "{}.{}", controlplane::CPLANE_PREFIX, controlplane::PROVIDER_AUCTION_REQ )) .as_ref(), &serde_json::to_vec(&req)?, )?; for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) { let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?; results.push(resp); } Ok(results) } /// After collecting the results of a provider launch auction, a "winner" from among the hosts /// can be selected and told to launch the given provider. The provider's bytes will be retrieved /// from the OCI registry. This function does _not_ confirm successful launch, only receipt /// of the launch request. pub fn launch_provider_on_host( &self, provider_ref: &str, host_id: &str, binding_name: &str, ) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> { let msg = LaunchProviderCommand { provider_ref: provider_ref.to_string(), binding_name: binding_name.to_string(), }; let ack: ProviderLaunchAck = serde_json::from_slice( &self .nc .request_timeout( &self.gen_launch_provider_subject(host_id), &serde_json::to_vec(&msg)?, self.timeout, )? .data, )?; Ok(ack) } /// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and /// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry. /// This function does _not_ confirm successful launch, only that the target host acknowledged the request /// to launch. pub fn launch_actor_on_host( &self, actor_id: &str, host_id: &str, ) -> Result<LaunchAck, Box<dyn std::error::Error>> { let msg = LaunchCommand { actor_id: actor_id.to_string(), }; let ack: LaunchAck = serde_json::from_slice( &self .nc .request_timeout( &self.gen_launch_actor_subject(host_id), &serde_json::to_vec(&msg)?, self.timeout, )? .data, )?; Ok(ack) } /// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates /// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice /// events to see if the actor was successfully terminated pub fn stop_actor_on_host( &self, actor_id: &str, host_id: &str, ) -> Result<(), Box<dyn std::error::Error>> { let msg = TerminateCommand { actor_id: actor_id.to_string(), }; self.nc.publish( &self.gen_terminate_actor_subject(host_id), &serde_json::to_vec(&msg)?, )?; let _ = self.nc.flush(); Ok(()) } fn gen_subject(&self, subject: &str) -> String { match self.namespace.as_ref() { Some(s) => format!("{}.wasmbus.{}", s, subject), None => format!("wasmbus.{}", subject), } } } fn get_connection(host: &str, credsfile: Option<PathBuf>) -> nats::Connection { let mut opts = if let Some(creds) = credsfile { nats::Options::with_credentials(creds) } else { nats::Options::new() }; opts = opts.with_name("waSCC Lattice"); opts.connect(host).unwrap() }
{ let mut host_actors = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Actors { host, actors } = ir { host_actors .entry(host) .and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors)) .or_insert(actors.clone()); } } Ok(host_actors) }
identifier_body
lib.rs
//! # Lattice Client //! //! This library provides a client that communicates with a waSCC lattice using //! the lattice protocol over the NATS message broker. All waSCC hosts compiled //! in lattice mode have the ability to automatically form self-healing, self-managing //! infrastructure-agnostic clusters called [lattices](https://wascc.dev/docs/lattice/overview/) extern crate log; #[macro_use] extern crate serde; use std::{collections::HashMap, path::PathBuf, time::Duration}; use crossbeam::Sender; use wascap::prelude::*; use controlplane::{ LaunchAck, LaunchAuctionRequest, LaunchAuctionResponse, LaunchCommand, TerminateCommand, }; pub use events::{BusEvent, CloudEvent}; use crate::controlplane::{ LaunchProviderCommand, ProviderAuctionRequest, ProviderAuctionResponse, ProviderLaunchAck, }; pub mod controlplane; mod events; pub const INVENTORY_ACTORS: &str = "inventory.actors"; pub const INVENTORY_HOSTS: &str = "inventory.hosts"; pub const INVENTORY_BINDINGS: &str = "inventory.bindings"; pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities"; pub const EVENTS: &str = "events"; const AUCTION_TIMEOUT_SECONDS: u64 = 5; /// A response to a lattice probe for inventory. Note that these responses are returned /// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the /// client is responsible for aggregating many of these replies. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum InventoryResponse { /// A single host probe response Host(HostProfile), /// A list of all registered actors within a host Actors { host: String, actors: Vec<Claims<Actor>>, }, /// A list of configuration bindings of actors originating from the given host Bindings { host: String, bindings: Vec<Binding>, }, /// A list of capability providers currently running within the given host Capabilities { host: String, capabilities: Vec<HostedCapability>, }, } /// An overview of host information #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct HostProfile { /// The public key (subject) of the host pub id: String, /// The host's labels pub labels: HashMap<String, String>, /// Host uptime in milliseconds pub uptime_ms: u128, } /// Represents an instance of a capability, which is a binding name and /// the capability descriptor #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct HostedCapability { pub binding_name: String, pub descriptor: wascc_codec::capabilities::CapabilityDescriptor, } /// Represents a single configuration binding from an actor to a capability ID and binding /// name, with the specified configuration values. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Binding { pub actor: String, pub capability_id: String, pub binding_name: String, pub configuration: HashMap<String, String>, } /// A client for interacting with the lattice pub struct Client { nc: nats::Connection, namespace: Option<String>, timeout: Duration, } impl Client { /// Creates a new lattice client, connecting to the NATS server at the /// given host with an optional set of credentials (JWT auth) pub fn new( host: &str, credsfile: Option<PathBuf>, call_timeout: Duration, namespace: Option<String>, ) -> Self { Client { nc: get_connection(host, credsfile), timeout: call_timeout, namespace, } } pub fn with_connection( nc: nats::Connection, call_timeout: Duration, namespace: Option<String>, ) -> Self { Client { nc, timeout: call_timeout, namespace, } } /// Retrieves the list of all hosts running within the lattice. If it takes a host longer /// than the call timeout period to reply to the probe, it will not be included in the list /// of hosts. pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> { let mut hosts = vec![]; let sub = self .nc .request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Host(h) = ir { hosts.push(h); } } Ok(hosts) } /// Retrieves a list of all bindings from actors to capabilities within the lattice (provided /// the host responds to the probe within the client timeout period) pub fn get_bindings( &self, ) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> { let mut host_bindings = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Bindings { bindings: b, host } = ir { host_bindings .entry(host) .and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b)) .or_insert(b.clone()); } } Ok(host_bindings) } /// Retrieves the list of all actors currently running within the lattice (as discovered within /// the client timeout period) pub fn get_actors( &self, ) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> { let mut host_actors = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Actors { host, actors } = ir { host_actors .entry(host) .and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors)) .or_insert(actors.clone()); } } Ok(host_actors) } /// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period) pub fn get_capabilities( &self, ) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>> { let mut host_caps = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Capabilities { host, capabilities } = ir { host_caps .entry(host) .and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities)) .or_insert(capabilities.clone()); } } Ok(host_caps) } /// Watches the lattice for bus events. This will create a subscription in a background thread, so callers /// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender /// half of a channel to receive the events pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> { let _sub = self .nc .subscribe(self.gen_subject(EVENTS).as_ref())? .with_handler(move |msg| { let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap(); let be: BusEvent = serde_json::from_str(&ce.data).unwrap(); let _ = sender.send(be); Ok(()) }); Ok(()) } /// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started) /// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor /// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable /// hosts. The actor to be launched is identified by an OCI registry reference pub fn perform_actor_launch_auction( &self, actor_id: &str, constraints: HashMap<String, String>, ) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> { let mut results = vec![]; let req = LaunchAuctionRequest::new(actor_id, constraints); let sub = self.nc.request_multi( self.gen_subject(&format!( "{}.{}", controlplane::CPLANE_PREFIX, controlplane::AUCTION_REQ )) .as_ref(), &serde_json::to_vec(&req)?, )?; for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) { let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?; results.push(resp); } Ok(results) } /// Performs an auction among all hosts on the lattice, requesting that the given capability provider /// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the /// provider given the constraints will respond to the auction pub fn perform_provider_launch_auction( &self, provider_ref: &str, binding_name: &str, constraints: HashMap<String, String>, ) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> { let mut results = vec![]; let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints); let sub = self.nc.request_multi( self.gen_subject(&format!( "{}.{}", controlplane::CPLANE_PREFIX, controlplane::PROVIDER_AUCTION_REQ )) .as_ref(), &serde_json::to_vec(&req)?, )?; for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) { let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?; results.push(resp); } Ok(results) } /// After collecting the results of a provider launch auction, a "winner" from among the hosts /// can be selected and told to launch the given provider. The provider's bytes will be retrieved /// from the OCI registry. This function does _not_ confirm successful launch, only receipt /// of the launch request. pub fn launch_provider_on_host( &self, provider_ref: &str, host_id: &str, binding_name: &str, ) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> { let msg = LaunchProviderCommand { provider_ref: provider_ref.to_string(), binding_name: binding_name.to_string(), }; let ack: ProviderLaunchAck = serde_json::from_slice( &self .nc .request_timeout( &self.gen_launch_provider_subject(host_id), &serde_json::to_vec(&msg)?, self.timeout, )? .data, )?; Ok(ack) } /// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and /// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry. /// This function does _not_ confirm successful launch, only that the target host acknowledged the request /// to launch. pub fn launch_actor_on_host( &self, actor_id: &str, host_id: &str, ) -> Result<LaunchAck, Box<dyn std::error::Error>> { let msg = LaunchCommand { actor_id: actor_id.to_string(), }; let ack: LaunchAck = serde_json::from_slice( &self .nc .request_timeout( &self.gen_launch_actor_subject(host_id), &serde_json::to_vec(&msg)?, self.timeout, )? .data, )?; Ok(ack) } /// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates /// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice /// events to see if the actor was successfully terminated pub fn stop_actor_on_host( &self, actor_id: &str, host_id: &str, ) -> Result<(), Box<dyn std::error::Error>> { let msg = TerminateCommand { actor_id: actor_id.to_string(), }; self.nc.publish( &self.gen_terminate_actor_subject(host_id), &serde_json::to_vec(&msg)?, )?; let _ = self.nc.flush(); Ok(()) } fn
(&self, subject: &str) -> String { match self.namespace.as_ref() { Some(s) => format!("{}.wasmbus.{}", s, subject), None => format!("wasmbus.{}", subject), } } } fn get_connection(host: &str, credsfile: Option<PathBuf>) -> nats::Connection { let mut opts = if let Some(creds) = credsfile { nats::Options::with_credentials(creds) } else { nats::Options::new() }; opts = opts.with_name("waSCC Lattice"); opts.connect(host).unwrap() }
gen_subject
identifier_name
lib.rs
//! # Lattice Client //! //! This library provides a client that communicates with a waSCC lattice using //! the lattice protocol over the NATS message broker. All waSCC hosts compiled //! in lattice mode have the ability to automatically form self-healing, self-managing //! infrastructure-agnostic clusters called [lattices](https://wascc.dev/docs/lattice/overview/) extern crate log; #[macro_use] extern crate serde; use std::{collections::HashMap, path::PathBuf, time::Duration}; use crossbeam::Sender; use wascap::prelude::*; use controlplane::{ LaunchAck, LaunchAuctionRequest, LaunchAuctionResponse, LaunchCommand, TerminateCommand, }; pub use events::{BusEvent, CloudEvent}; use crate::controlplane::{ LaunchProviderCommand, ProviderAuctionRequest, ProviderAuctionResponse, ProviderLaunchAck, }; pub mod controlplane; mod events;
pub const INVENTORY_CAPABILITIES: &str = "inventory.capabilities"; pub const EVENTS: &str = "events"; const AUCTION_TIMEOUT_SECONDS: u64 = 5; /// A response to a lattice probe for inventory. Note that these responses are returned /// through regular (non-queue) subscriptions via a scatter-gather like pattern, so the /// client is responsible for aggregating many of these replies. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub enum InventoryResponse { /// A single host probe response Host(HostProfile), /// A list of all registered actors within a host Actors { host: String, actors: Vec<Claims<Actor>>, }, /// A list of configuration bindings of actors originating from the given host Bindings { host: String, bindings: Vec<Binding>, }, /// A list of capability providers currently running within the given host Capabilities { host: String, capabilities: Vec<HostedCapability>, }, } /// An overview of host information #[derive(Debug, Deserialize, Serialize, Clone, PartialEq)] pub struct HostProfile { /// The public key (subject) of the host pub id: String, /// The host's labels pub labels: HashMap<String, String>, /// Host uptime in milliseconds pub uptime_ms: u128, } /// Represents an instance of a capability, which is a binding name and /// the capability descriptor #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct HostedCapability { pub binding_name: String, pub descriptor: wascc_codec::capabilities::CapabilityDescriptor, } /// Represents a single configuration binding from an actor to a capability ID and binding /// name, with the specified configuration values. #[derive(Debug, Serialize, Deserialize, Clone, PartialEq)] pub struct Binding { pub actor: String, pub capability_id: String, pub binding_name: String, pub configuration: HashMap<String, String>, } /// A client for interacting with the lattice pub struct Client { nc: nats::Connection, namespace: Option<String>, timeout: Duration, } impl Client { /// Creates a new lattice client, connecting to the NATS server at the /// given host with an optional set of credentials (JWT auth) pub fn new( host: &str, credsfile: Option<PathBuf>, call_timeout: Duration, namespace: Option<String>, ) -> Self { Client { nc: get_connection(host, credsfile), timeout: call_timeout, namespace, } } pub fn with_connection( nc: nats::Connection, call_timeout: Duration, namespace: Option<String>, ) -> Self { Client { nc, timeout: call_timeout, namespace, } } /// Retrieves the list of all hosts running within the lattice. If it takes a host longer /// than the call timeout period to reply to the probe, it will not be included in the list /// of hosts. pub fn get_hosts(&self) -> std::result::Result<Vec<HostProfile>, Box<dyn std::error::Error>> { let mut hosts = vec![]; let sub = self .nc .request_multi(self.gen_subject(INVENTORY_HOSTS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Host(h) = ir { hosts.push(h); } } Ok(hosts) } /// Retrieves a list of all bindings from actors to capabilities within the lattice (provided /// the host responds to the probe within the client timeout period) pub fn get_bindings( &self, ) -> std::result::Result<HashMap<String, Vec<Binding>>, Box<dyn std::error::Error>> { let mut host_bindings = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_BINDINGS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Bindings { bindings: b, host } = ir { host_bindings .entry(host) .and_modify(|e: &mut Vec<Binding>| e.extend_from_slice(&b)) .or_insert(b.clone()); } } Ok(host_bindings) } /// Retrieves the list of all actors currently running within the lattice (as discovered within /// the client timeout period) pub fn get_actors( &self, ) -> std::result::Result<HashMap<String, Vec<Claims<Actor>>>, Box<dyn std::error::Error>> { let mut host_actors = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_ACTORS).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Actors { host, actors } = ir { host_actors .entry(host) .and_modify(|e: &mut Vec<Claims<Actor>>| e.extend_from_slice(&actors)) .or_insert(actors.clone()); } } Ok(host_actors) } /// Retrieves the list of all capabilities within the lattice (discovery limited by the client timeout period) pub fn get_capabilities( &self, ) -> std::result::Result<HashMap<String, Vec<HostedCapability>>, Box<dyn std::error::Error>> { let mut host_caps = HashMap::new(); let sub = self .nc .request_multi(self.gen_subject(INVENTORY_CAPABILITIES).as_ref(), &[])?; for msg in sub.timeout_iter(self.timeout) { let ir: InventoryResponse = serde_json::from_slice(&msg.data)?; if let InventoryResponse::Capabilities { host, capabilities } = ir { host_caps .entry(host) .and_modify(|e: &mut Vec<HostedCapability>| e.extend_from_slice(&capabilities)) .or_insert(capabilities.clone()); } } Ok(host_caps) } /// Watches the lattice for bus events. This will create a subscription in a background thread, so callers /// are responsible for ensuring their process remains alive however long is appropriate. Pass the sender /// half of a channel to receive the events pub fn watch_events(&self, sender: Sender<BusEvent>) -> Result<(), Box<dyn std::error::Error>> { let _sub = self .nc .subscribe(self.gen_subject(EVENTS).as_ref())? .with_handler(move |msg| { let ce: CloudEvent = serde_json::from_slice(&msg.data).unwrap(); let be: BusEvent = serde_json::from_str(&ce.data).unwrap(); let _ = sender.send(be); Ok(()) }); Ok(()) } /// Performs an auction among all hosts on the lattice, requesting that the given actor be launched (loaded+started) /// on a suitable host as described by the set of constraints. Only hosts that believe they can launch the actor /// will reply. In other words, there will be no negative responses in the result vector, only a list of suitable /// hosts. The actor to be launched is identified by an OCI registry reference pub fn perform_actor_launch_auction( &self, actor_id: &str, constraints: HashMap<String, String>, ) -> Result<Vec<LaunchAuctionResponse>, Box<dyn std::error::Error>> { let mut results = vec![]; let req = LaunchAuctionRequest::new(actor_id, constraints); let sub = self.nc.request_multi( self.gen_subject(&format!( "{}.{}", controlplane::CPLANE_PREFIX, controlplane::AUCTION_REQ )) .as_ref(), &serde_json::to_vec(&req)?, )?; for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) { let resp: LaunchAuctionResponse = serde_json::from_slice(&msg.data)?; results.push(resp); } Ok(results) } /// Performs an auction among all hosts on the lattice, requesting that the given capability provider /// (indicated by OCI image reference) be loaded/started. Hosts that believe they can host the /// provider given the constraints will respond to the auction pub fn perform_provider_launch_auction( &self, provider_ref: &str, binding_name: &str, constraints: HashMap<String, String>, ) -> Result<Vec<ProviderAuctionResponse>, Box<dyn std::error::Error>> { let mut results = vec![]; let req = ProviderAuctionRequest::new(provider_ref, binding_name, constraints); let sub = self.nc.request_multi( self.gen_subject(&format!( "{}.{}", controlplane::CPLANE_PREFIX, controlplane::PROVIDER_AUCTION_REQ )) .as_ref(), &serde_json::to_vec(&req)?, )?; for msg in sub.timeout_iter(Duration::from_secs(AUCTION_TIMEOUT_SECONDS)) { let resp: ProviderAuctionResponse = serde_json::from_slice(&msg.data)?; results.push(resp); } Ok(results) } /// After collecting the results of a provider launch auction, a "winner" from among the hosts /// can be selected and told to launch the given provider. The provider's bytes will be retrieved /// from the OCI registry. This function does _not_ confirm successful launch, only receipt /// of the launch request. pub fn launch_provider_on_host( &self, provider_ref: &str, host_id: &str, binding_name: &str, ) -> Result<ProviderLaunchAck, Box<dyn std::error::Error>> { let msg = LaunchProviderCommand { provider_ref: provider_ref.to_string(), binding_name: binding_name.to_string(), }; let ack: ProviderLaunchAck = serde_json::from_slice( &self .nc .request_timeout( &self.gen_launch_provider_subject(host_id), &serde_json::to_vec(&msg)?, self.timeout, )? .data, )?; Ok(ack) } /// After collecting the results of a launch auction, a "winner" from among the hosts can be selected and /// told to launch a given actor. Note that the actor's bytes will be retrieved from the OCI registry. /// This function does _not_ confirm successful launch, only that the target host acknowledged the request /// to launch. pub fn launch_actor_on_host( &self, actor_id: &str, host_id: &str, ) -> Result<LaunchAck, Box<dyn std::error::Error>> { let msg = LaunchCommand { actor_id: actor_id.to_string(), }; let ack: LaunchAck = serde_json::from_slice( &self .nc .request_timeout( &self.gen_launch_actor_subject(host_id), &serde_json::to_vec(&msg)?, self.timeout, )? .data, )?; Ok(ack) } /// Sends a command to the specified host telling it to terminate an actor. The success of this command indicates /// a successful publication, and not necessarily a successful remote actor termination. Monitor the lattice /// events to see if the actor was successfully terminated pub fn stop_actor_on_host( &self, actor_id: &str, host_id: &str, ) -> Result<(), Box<dyn std::error::Error>> { let msg = TerminateCommand { actor_id: actor_id.to_string(), }; self.nc.publish( &self.gen_terminate_actor_subject(host_id), &serde_json::to_vec(&msg)?, )?; let _ = self.nc.flush(); Ok(()) } fn gen_subject(&self, subject: &str) -> String { match self.namespace.as_ref() { Some(s) => format!("{}.wasmbus.{}", s, subject), None => format!("wasmbus.{}", subject), } } } fn get_connection(host: &str, credsfile: Option<PathBuf>) -> nats::Connection { let mut opts = if let Some(creds) = credsfile { nats::Options::with_credentials(creds) } else { nats::Options::new() }; opts = opts.with_name("waSCC Lattice"); opts.connect(host).unwrap() }
pub const INVENTORY_ACTORS: &str = "inventory.actors"; pub const INVENTORY_HOSTS: &str = "inventory.hosts"; pub const INVENTORY_BINDINGS: &str = "inventory.bindings";
random_line_split
mod.rs
pub mod cut_detector; pub mod ring; pub mod view; use crate::{ common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents}, consensus::FastPaxos, error::Result, event::{Event, NodeStatusChange}, monitor::Monitor, transport::{ proto::{ self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus, Metadata, NodeStatus, PreJoinMessage, }, Message, Request, Response, }, }; use cut_detector::CutDetector; use view::View; use futures::FutureExt; use std::{ collections::{HashMap, VecDeque}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, mpsc, oneshot}; use tracing::info; type OutboundResponse = oneshot::Sender<crate::Result<Response>>; #[derive(Debug)] pub struct Membership<M> { host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, alerts: VecDeque<proto::Alert>, last_enqueued_alert: Instant, joiners_to_respond: Vec<Endpoint>, // joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>, batch_window: Duration, paxos: FastPaxos, announced_proposal: bool, joiner_data: HashMap<Endpoint, (NodeId, Metadata)>, event_tx: broadcast::Sender<Event>, monitor_cancellers: Vec<oneshot::Sender<()>>, messages: VecDeque<(Endpoint, Message)>, } impl<M: Monitor> Membership<M> { #[allow(dead_code)] pub fn new( host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, event_tx: broadcast::Sender<Event>, ) -> Self { // TODO: setup startup tasks let paxos = FastPaxos::new( host_addr.clone(), view.get_membership_size(), view.get_current_config_id(), ); Self { host_addr, view, cut_detector, monitor, paxos, alerts: VecDeque::default(), last_enqueued_alert: Instant::now(), joiners_to_respond: Vec::new(), batch_window: Duration::new(10, 0), announced_proposal: false, joiner_data: HashMap::default(), monitor_cancellers: vec![], event_tx, messages: VecDeque::new(), } } #[allow(dead_code)] fn send_initial_notification(&self) { self.event_tx .send(Event::ViewChange(self.get_inititial_view_changes())) .expect("Unable to send response"); } fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> { let nodes = self.view.get_ring(0); nodes .iter() .map(|_| NodeStatusChange { endpoint: self.host_addr.clone(), status: EdgeStatus::Up, metadata: Metadata::default(), }) .collect() } pub fn view(&self) -> Vec<&Endpoint> { self.view .get_ring(0) .expect("There is always a ring!") .iter() .collect() } pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) { use proto::RequestKind::*; match msg { PreJoin(msg) => self.handle_pre_join(from, msg), Join(msg) => self.handle_join(from, msg), BatchedAlert(msg) => self.handle_batched_alert_message(msg), Consensus(msg) => { let view = self .view .get_ring(0) .expect("Ring zero should always exist") .iter() .collect(); let msgs = self.paxos.step(msg, view); self.messages.extend(msgs); } _ => todo!("request type not implemented yet"), } } pub fn start_classic_round(&mut self) -> Result<()> { // TODO: make paxos syncrhonous // self.paxos.start_classic_round() todo!() } pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) { let PreJoinMessage { sender, node_id, .. } = msg; let status = self.view.is_safe_to_join(&sender, &node_id); let config_id = self.view.get_config().config_id(); let endpoints = if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing { self.view.get_expected_observers(&sender) } else { Vec::new() }; let join_res = JoinResponse { sender, status, config_id, endpoints, identifiers: Vec::new(), cluster_metadata: HashMap::new(), }; info!( message = "Join at seed.", seed = %self.host_addr, sender = %join_res.sender, config = %join_res.config_id, size = %self.view.get_membership_size() ); self.messages .push_back((from, proto::ResponseKind::Join(join_res).into())); } pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) { if msg.config_id == self.view.get_current_config_id() { let config = self.view.get_config(); // TODO: do we still need to do this? // self.joiners_to_respond // .entry(msg.sender.clone()) // .or_insert_with(VecDeque::new) // .push_back(from); let alert = proto::Alert { src: self.host_addr.clone(), dst: msg.sender.clone(), edge_status: proto::EdgeStatus::Up, config_id: config.config_id(), node_id: Some(msg.node_id.clone()), ring_number: msg.ring_number, metadata: None, }; self.enqueue_alert(alert); } else { // This is the case where the config changed between phase 1 // and phase 2 of the join process. let response = if self.view.is_host_present(&msg.sender) && self.view.is_node_id_present(&msg.node_id) { let config = self.view.get_config(); // Race condition where a observer already crossed H messages for the joiner and // changed the configuration, but the JoinPhase2 message shows up at the observer // after it has already added the joiner. In this case, simply tell the joiner it's // safe to join proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: config.config_id(), endpoints: config.endpoints.clone(), identifiers: config.node_ids.clone(), cluster_metadata: HashMap::new(), } } else { proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::ConfigChanged, config_id: self.view.get_current_config_id(), endpoints: vec![], identifiers: vec![], cluster_metadata: HashMap::new(), } }; self.messages .push_back((from, proto::ResponseKind::Join(response).into())); } } // Invoked by observers of a node for failure detection fn handle_probe_message(&self) -> Response { Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG } // Receives edge update events and delivers them to the cut detector to check if it will // return a valid proposal. // // Edge update messages that do not affect the ongoing proposal need to be dropped. fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) { let current_config_id = self.view.get_current_config_id(); let size = self.view.get_membership_size(); let mut proposal: Vec<Endpoint> = msg_batch .alerts .iter() // filter out messages which violate membership invariants // And then run the cut detector to see if there is a new proposal .filter_map(|message| { if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) { return None; } Some(self.cut_detector.aggregate(message)) }) .flatten() .collect(); proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view)); if !proposal.is_empty() { self.announced_proposal = true; self.event_tx .send(Event::ViewChangeProposal( self.create_node_status_change_list(proposal.clone()), )) .expect("Unable to send response"); // TODO: make paxos syncrhonous // self.paxos.propose(proposal, scheduler).await? } } fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> { proposal .iter() .map(|node| NodeStatusChange { endpoint: node.to_string(), status: if self.view.is_host_present(node) { EdgeStatus::Down } else
, metadata: Metadata::default(), }) .collect() } // Filter for removing invalid edge update messages. These include messages // that were for a configuration that the current node is not a part of, and messages // that violate teh semantics of being a part of a configuration fn filter_alert_messages( &mut self, _message_batch: &BatchedAlertMessage, // Might require this later for loggign message: &Alert, _size: usize, config_id: ConfigId, ) -> bool { let dst = &message.dst; if config_id != message.config_id { return false; } // An invariant to maintain is that a node can only go into the membership set once // and leave it once if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) { return false; } if message.edge_status == EdgeStatus::Up { // Add joiner data after the node is done being added to the set. Store in a // temp location for now. self.joiner_data.insert( dst.clone(), ( message.node_id.clone().take().unwrap(), message.metadata.clone().take().unwrap(), ), ); } true } pub fn create_failure_detectors( &mut self, scheduler: &mut Scheduler, ) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> { todo!() // let (tx, rx) = mpsc::channel(1000); // for subject in self.view.get_subjects(&self.host_addr)? { // let (mon_tx, mon_rx) = oneshot::channel(); // let fut = self.monitor.monitor( // subject.clone(), // client.clone(), // self.view.get_current_config_id(), // tx.clone(), // mon_rx, // ); // scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None))); // self.monitor_cancellers.push(mon_tx); // } // Ok(rx) } #[allow(dead_code)] pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) { if config_id != self.view.get_current_config_id() { // TODO: Figure out why &String does not impl Value // info!( // target: "Failure notification from old config.", // subject = subject, // config = self.view.get_current_config_id(), // old_config = config_id // ); // return; } let alert = proto::Alert { src: self.host_addr.clone(), dst: subject.clone(), edge_status: proto::EdgeStatus::Down, config_id, node_id: None, ring_number: self .view .get_ring_numbers(&self.host_addr, &subject) .expect("Unable to get ring number"), metadata: None, }; self.enqueue_alert(alert); } pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> { if !self.alerts.is_empty() && (Instant::now() - self.last_enqueued_alert) > self.batch_window { let alerts = self.alerts.drain(..).collect(); Some(proto::BatchedAlertMessage { sender: self.host_addr.clone(), alerts, }) } else { None } } pub fn enqueue_alert(&mut self, alert: proto::Alert) { self.last_enqueued_alert = Instant::now(); self.alerts.push_back(alert); } /// This is invoked when the consensus module decides on a proposal /// /// Any node that is not in the membership list will be added to the cluster, /// and any node that is currently in the membership list, but not in the proposal /// will be removed. pub fn on_decide(&mut self, proposal: Vec<Endpoint>) { // TODO: Handle metadata updates // TODO: Handle subscriptions self.cancel_failure_detectors(); for node in &proposal { if self.view.is_host_present(&node) { self.view.ring_delete(&node); } else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) { self.view.ring_add(node.clone(), node_id); } else { panic!("Node not present in pre-join metadata") } } let _current_config_id = self.view.get_current_config_id(); // clear data structures self.cut_detector.clear(); self.announced_proposal = false; if self.view.is_host_present(&self.host_addr) { // TODO: inform edge failure detector about config change } else { // We need to gracefully exit by calling a user handler and invalidating the current // session unimplemented!("How do you manage a callback again?"); } // TODO: Instantiate new consensus instance // self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), ) self.respond_to_joiners(proposal); } fn cancel_failure_detectors(&mut self) { for signal in self.monitor_cancellers.drain(..) { let _ = signal.send(()); } } fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) { let configuration = self.view.get_config(); let join_res = JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: configuration.config_id(), endpoints: configuration.endpoints.clone(), identifiers: configuration.node_ids.clone(), cluster_metadata: HashMap::new(), // TODO: metadata manager }; for node in proposal { self.messages .push_back((node, proto::ResponseKind::Join(join_res.clone()).into())); // self.joiners_to_respond.remove(&node).and_then(|joiners| { // joiners.into_iter().for_each(|joiner| { // joiner // .send(Ok(Response::new_join(join_res.clone()))) // .expect("Unable to send response"); // }); // // This is so the compiler can infer the type of the closure to be Option<()> // Some(()) // }); } } pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> { let mut msgs = Vec::new(); while let Some(msg) = self.messages.pop_front() { msgs.push(msg); } msgs } }
{ EdgeStatus::Up }
conditional_block
mod.rs
pub mod cut_detector; pub mod ring; pub mod view; use crate::{ common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents}, consensus::FastPaxos, error::Result, event::{Event, NodeStatusChange}, monitor::Monitor, transport::{ proto::{ self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus, Metadata, NodeStatus, PreJoinMessage, }, Message, Request, Response, }, }; use cut_detector::CutDetector; use view::View; use futures::FutureExt; use std::{ collections::{HashMap, VecDeque}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, mpsc, oneshot}; use tracing::info; type OutboundResponse = oneshot::Sender<crate::Result<Response>>; #[derive(Debug)] pub struct Membership<M> { host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, alerts: VecDeque<proto::Alert>, last_enqueued_alert: Instant, joiners_to_respond: Vec<Endpoint>, // joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>, batch_window: Duration, paxos: FastPaxos, announced_proposal: bool, joiner_data: HashMap<Endpoint, (NodeId, Metadata)>, event_tx: broadcast::Sender<Event>, monitor_cancellers: Vec<oneshot::Sender<()>>, messages: VecDeque<(Endpoint, Message)>, } impl<M: Monitor> Membership<M> { #[allow(dead_code)] pub fn new( host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, event_tx: broadcast::Sender<Event>, ) -> Self { // TODO: setup startup tasks let paxos = FastPaxos::new( host_addr.clone(), view.get_membership_size(), view.get_current_config_id(), ); Self { host_addr, view, cut_detector, monitor, paxos, alerts: VecDeque::default(), last_enqueued_alert: Instant::now(), joiners_to_respond: Vec::new(), batch_window: Duration::new(10, 0), announced_proposal: false, joiner_data: HashMap::default(), monitor_cancellers: vec![], event_tx, messages: VecDeque::new(), } } #[allow(dead_code)] fn send_initial_notification(&self) { self.event_tx .send(Event::ViewChange(self.get_inititial_view_changes())) .expect("Unable to send response"); } fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> { let nodes = self.view.get_ring(0); nodes .iter() .map(|_| NodeStatusChange { endpoint: self.host_addr.clone(), status: EdgeStatus::Up, metadata: Metadata::default(), }) .collect() } pub fn view(&self) -> Vec<&Endpoint> { self.view .get_ring(0) .expect("There is always a ring!") .iter() .collect() } pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) { use proto::RequestKind::*; match msg { PreJoin(msg) => self.handle_pre_join(from, msg), Join(msg) => self.handle_join(from, msg), BatchedAlert(msg) => self.handle_batched_alert_message(msg), Consensus(msg) => { let view = self .view .get_ring(0) .expect("Ring zero should always exist") .iter() .collect(); let msgs = self.paxos.step(msg, view); self.messages.extend(msgs); } _ => todo!("request type not implemented yet"), } } pub fn start_classic_round(&mut self) -> Result<()> { // TODO: make paxos syncrhonous // self.paxos.start_classic_round() todo!() } pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) { let PreJoinMessage { sender, node_id, .. } = msg; let status = self.view.is_safe_to_join(&sender, &node_id); let config_id = self.view.get_config().config_id(); let endpoints = if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing { self.view.get_expected_observers(&sender) } else { Vec::new() }; let join_res = JoinResponse { sender, status, config_id, endpoints, identifiers: Vec::new(), cluster_metadata: HashMap::new(), }; info!( message = "Join at seed.", seed = %self.host_addr, sender = %join_res.sender, config = %join_res.config_id, size = %self.view.get_membership_size() ); self.messages .push_back((from, proto::ResponseKind::Join(join_res).into())); } pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) { if msg.config_id == self.view.get_current_config_id() { let config = self.view.get_config(); // TODO: do we still need to do this? // self.joiners_to_respond // .entry(msg.sender.clone()) // .or_insert_with(VecDeque::new) // .push_back(from); let alert = proto::Alert { src: self.host_addr.clone(), dst: msg.sender.clone(), edge_status: proto::EdgeStatus::Up, config_id: config.config_id(), node_id: Some(msg.node_id.clone()), ring_number: msg.ring_number, metadata: None, }; self.enqueue_alert(alert); } else { // This is the case where the config changed between phase 1 // and phase 2 of the join process. let response = if self.view.is_host_present(&msg.sender) && self.view.is_node_id_present(&msg.node_id) { let config = self.view.get_config(); // Race condition where a observer already crossed H messages for the joiner and // changed the configuration, but the JoinPhase2 message shows up at the observer // after it has already added the joiner. In this case, simply tell the joiner it's // safe to join proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: config.config_id(), endpoints: config.endpoints.clone(), identifiers: config.node_ids.clone(), cluster_metadata: HashMap::new(), } } else { proto::JoinResponse {
cluster_metadata: HashMap::new(), } }; self.messages .push_back((from, proto::ResponseKind::Join(response).into())); } } // Invoked by observers of a node for failure detection fn handle_probe_message(&self) -> Response { Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG } // Receives edge update events and delivers them to the cut detector to check if it will // return a valid proposal. // // Edge update messages that do not affect the ongoing proposal need to be dropped. fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) { let current_config_id = self.view.get_current_config_id(); let size = self.view.get_membership_size(); let mut proposal: Vec<Endpoint> = msg_batch .alerts .iter() // filter out messages which violate membership invariants // And then run the cut detector to see if there is a new proposal .filter_map(|message| { if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) { return None; } Some(self.cut_detector.aggregate(message)) }) .flatten() .collect(); proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view)); if !proposal.is_empty() { self.announced_proposal = true; self.event_tx .send(Event::ViewChangeProposal( self.create_node_status_change_list(proposal.clone()), )) .expect("Unable to send response"); // TODO: make paxos syncrhonous // self.paxos.propose(proposal, scheduler).await? } } fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> { proposal .iter() .map(|node| NodeStatusChange { endpoint: node.to_string(), status: if self.view.is_host_present(node) { EdgeStatus::Down } else { EdgeStatus::Up }, metadata: Metadata::default(), }) .collect() } // Filter for removing invalid edge update messages. These include messages // that were for a configuration that the current node is not a part of, and messages // that violate teh semantics of being a part of a configuration fn filter_alert_messages( &mut self, _message_batch: &BatchedAlertMessage, // Might require this later for loggign message: &Alert, _size: usize, config_id: ConfigId, ) -> bool { let dst = &message.dst; if config_id != message.config_id { return false; } // An invariant to maintain is that a node can only go into the membership set once // and leave it once if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) { return false; } if message.edge_status == EdgeStatus::Up { // Add joiner data after the node is done being added to the set. Store in a // temp location for now. self.joiner_data.insert( dst.clone(), ( message.node_id.clone().take().unwrap(), message.metadata.clone().take().unwrap(), ), ); } true } pub fn create_failure_detectors( &mut self, scheduler: &mut Scheduler, ) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> { todo!() // let (tx, rx) = mpsc::channel(1000); // for subject in self.view.get_subjects(&self.host_addr)? { // let (mon_tx, mon_rx) = oneshot::channel(); // let fut = self.monitor.monitor( // subject.clone(), // client.clone(), // self.view.get_current_config_id(), // tx.clone(), // mon_rx, // ); // scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None))); // self.monitor_cancellers.push(mon_tx); // } // Ok(rx) } #[allow(dead_code)] pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) { if config_id != self.view.get_current_config_id() { // TODO: Figure out why &String does not impl Value // info!( // target: "Failure notification from old config.", // subject = subject, // config = self.view.get_current_config_id(), // old_config = config_id // ); // return; } let alert = proto::Alert { src: self.host_addr.clone(), dst: subject.clone(), edge_status: proto::EdgeStatus::Down, config_id, node_id: None, ring_number: self .view .get_ring_numbers(&self.host_addr, &subject) .expect("Unable to get ring number"), metadata: None, }; self.enqueue_alert(alert); } pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> { if !self.alerts.is_empty() && (Instant::now() - self.last_enqueued_alert) > self.batch_window { let alerts = self.alerts.drain(..).collect(); Some(proto::BatchedAlertMessage { sender: self.host_addr.clone(), alerts, }) } else { None } } pub fn enqueue_alert(&mut self, alert: proto::Alert) { self.last_enqueued_alert = Instant::now(); self.alerts.push_back(alert); } /// This is invoked when the consensus module decides on a proposal /// /// Any node that is not in the membership list will be added to the cluster, /// and any node that is currently in the membership list, but not in the proposal /// will be removed. pub fn on_decide(&mut self, proposal: Vec<Endpoint>) { // TODO: Handle metadata updates // TODO: Handle subscriptions self.cancel_failure_detectors(); for node in &proposal { if self.view.is_host_present(&node) { self.view.ring_delete(&node); } else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) { self.view.ring_add(node.clone(), node_id); } else { panic!("Node not present in pre-join metadata") } } let _current_config_id = self.view.get_current_config_id(); // clear data structures self.cut_detector.clear(); self.announced_proposal = false; if self.view.is_host_present(&self.host_addr) { // TODO: inform edge failure detector about config change } else { // We need to gracefully exit by calling a user handler and invalidating the current // session unimplemented!("How do you manage a callback again?"); } // TODO: Instantiate new consensus instance // self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), ) self.respond_to_joiners(proposal); } fn cancel_failure_detectors(&mut self) { for signal in self.monitor_cancellers.drain(..) { let _ = signal.send(()); } } fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) { let configuration = self.view.get_config(); let join_res = JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: configuration.config_id(), endpoints: configuration.endpoints.clone(), identifiers: configuration.node_ids.clone(), cluster_metadata: HashMap::new(), // TODO: metadata manager }; for node in proposal { self.messages .push_back((node, proto::ResponseKind::Join(join_res.clone()).into())); // self.joiners_to_respond.remove(&node).and_then(|joiners| { // joiners.into_iter().for_each(|joiner| { // joiner // .send(Ok(Response::new_join(join_res.clone()))) // .expect("Unable to send response"); // }); // // This is so the compiler can infer the type of the closure to be Option<()> // Some(()) // }); } } pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> { let mut msgs = Vec::new(); while let Some(msg) = self.messages.pop_front() { msgs.push(msg); } msgs } }
sender: self.host_addr.clone(), status: JoinStatus::ConfigChanged, config_id: self.view.get_current_config_id(), endpoints: vec![], identifiers: vec![],
random_line_split
mod.rs
pub mod cut_detector; pub mod ring; pub mod view; use crate::{ common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents}, consensus::FastPaxos, error::Result, event::{Event, NodeStatusChange}, monitor::Monitor, transport::{ proto::{ self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus, Metadata, NodeStatus, PreJoinMessage, }, Message, Request, Response, }, }; use cut_detector::CutDetector; use view::View; use futures::FutureExt; use std::{ collections::{HashMap, VecDeque}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, mpsc, oneshot}; use tracing::info; type OutboundResponse = oneshot::Sender<crate::Result<Response>>; #[derive(Debug)] pub struct Membership<M> { host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, alerts: VecDeque<proto::Alert>, last_enqueued_alert: Instant, joiners_to_respond: Vec<Endpoint>, // joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>, batch_window: Duration, paxos: FastPaxos, announced_proposal: bool, joiner_data: HashMap<Endpoint, (NodeId, Metadata)>, event_tx: broadcast::Sender<Event>, monitor_cancellers: Vec<oneshot::Sender<()>>, messages: VecDeque<(Endpoint, Message)>, } impl<M: Monitor> Membership<M> { #[allow(dead_code)] pub fn new( host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, event_tx: broadcast::Sender<Event>, ) -> Self { // TODO: setup startup tasks let paxos = FastPaxos::new( host_addr.clone(), view.get_membership_size(), view.get_current_config_id(), ); Self { host_addr, view, cut_detector, monitor, paxos, alerts: VecDeque::default(), last_enqueued_alert: Instant::now(), joiners_to_respond: Vec::new(), batch_window: Duration::new(10, 0), announced_proposal: false, joiner_data: HashMap::default(), monitor_cancellers: vec![], event_tx, messages: VecDeque::new(), } } #[allow(dead_code)] fn send_initial_notification(&self) { self.event_tx .send(Event::ViewChange(self.get_inititial_view_changes())) .expect("Unable to send response"); } fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange> { let nodes = self.view.get_ring(0); nodes .iter() .map(|_| NodeStatusChange { endpoint: self.host_addr.clone(), status: EdgeStatus::Up, metadata: Metadata::default(), }) .collect() } pub fn view(&self) -> Vec<&Endpoint> { self.view .get_ring(0) .expect("There is always a ring!") .iter() .collect() } pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) { use proto::RequestKind::*; match msg { PreJoin(msg) => self.handle_pre_join(from, msg), Join(msg) => self.handle_join(from, msg), BatchedAlert(msg) => self.handle_batched_alert_message(msg), Consensus(msg) => { let view = self .view .get_ring(0) .expect("Ring zero should always exist") .iter() .collect(); let msgs = self.paxos.step(msg, view); self.messages.extend(msgs); } _ => todo!("request type not implemented yet"), } } pub fn start_classic_round(&mut self) -> Result<()> { // TODO: make paxos syncrhonous // self.paxos.start_classic_round() todo!() } pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) { let PreJoinMessage { sender, node_id, .. } = msg; let status = self.view.is_safe_to_join(&sender, &node_id); let config_id = self.view.get_config().config_id(); let endpoints = if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing { self.view.get_expected_observers(&sender) } else { Vec::new() }; let join_res = JoinResponse { sender, status, config_id, endpoints, identifiers: Vec::new(), cluster_metadata: HashMap::new(), }; info!( message = "Join at seed.", seed = %self.host_addr, sender = %join_res.sender, config = %join_res.config_id, size = %self.view.get_membership_size() ); self.messages .push_back((from, proto::ResponseKind::Join(join_res).into())); } pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) { if msg.config_id == self.view.get_current_config_id() { let config = self.view.get_config(); // TODO: do we still need to do this? // self.joiners_to_respond // .entry(msg.sender.clone()) // .or_insert_with(VecDeque::new) // .push_back(from); let alert = proto::Alert { src: self.host_addr.clone(), dst: msg.sender.clone(), edge_status: proto::EdgeStatus::Up, config_id: config.config_id(), node_id: Some(msg.node_id.clone()), ring_number: msg.ring_number, metadata: None, }; self.enqueue_alert(alert); } else { // This is the case where the config changed between phase 1 // and phase 2 of the join process. let response = if self.view.is_host_present(&msg.sender) && self.view.is_node_id_present(&msg.node_id) { let config = self.view.get_config(); // Race condition where a observer already crossed H messages for the joiner and // changed the configuration, but the JoinPhase2 message shows up at the observer // after it has already added the joiner. In this case, simply tell the joiner it's // safe to join proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: config.config_id(), endpoints: config.endpoints.clone(), identifiers: config.node_ids.clone(), cluster_metadata: HashMap::new(), } } else { proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::ConfigChanged, config_id: self.view.get_current_config_id(), endpoints: vec![], identifiers: vec![], cluster_metadata: HashMap::new(), } }; self.messages .push_back((from, proto::ResponseKind::Join(response).into())); } } // Invoked by observers of a node for failure detection fn handle_probe_message(&self) -> Response { Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG } // Receives edge update events and delivers them to the cut detector to check if it will // return a valid proposal. // // Edge update messages that do not affect the ongoing proposal need to be dropped. fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) { let current_config_id = self.view.get_current_config_id(); let size = self.view.get_membership_size(); let mut proposal: Vec<Endpoint> = msg_batch .alerts .iter() // filter out messages which violate membership invariants // And then run the cut detector to see if there is a new proposal .filter_map(|message| { if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) { return None; } Some(self.cut_detector.aggregate(message)) }) .flatten() .collect(); proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view)); if !proposal.is_empty() { self.announced_proposal = true; self.event_tx .send(Event::ViewChangeProposal( self.create_node_status_change_list(proposal.clone()), )) .expect("Unable to send response"); // TODO: make paxos syncrhonous // self.paxos.propose(proposal, scheduler).await? } } fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> { proposal .iter() .map(|node| NodeStatusChange { endpoint: node.to_string(), status: if self.view.is_host_present(node) { EdgeStatus::Down } else { EdgeStatus::Up }, metadata: Metadata::default(), }) .collect() } // Filter for removing invalid edge update messages. These include messages // that were for a configuration that the current node is not a part of, and messages // that violate teh semantics of being a part of a configuration fn filter_alert_messages( &mut self, _message_batch: &BatchedAlertMessage, // Might require this later for loggign message: &Alert, _size: usize, config_id: ConfigId, ) -> bool { let dst = &message.dst; if config_id != message.config_id { return false; } // An invariant to maintain is that a node can only go into the membership set once // and leave it once if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) { return false; } if message.edge_status == EdgeStatus::Up { // Add joiner data after the node is done being added to the set. Store in a // temp location for now. self.joiner_data.insert( dst.clone(), ( message.node_id.clone().take().unwrap(), message.metadata.clone().take().unwrap(), ), ); } true } pub fn create_failure_detectors( &mut self, scheduler: &mut Scheduler, ) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> { todo!() // let (tx, rx) = mpsc::channel(1000); // for subject in self.view.get_subjects(&self.host_addr)? { // let (mon_tx, mon_rx) = oneshot::channel(); // let fut = self.monitor.monitor( // subject.clone(), // client.clone(), // self.view.get_current_config_id(), // tx.clone(), // mon_rx, // ); // scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None))); // self.monitor_cancellers.push(mon_tx); // } // Ok(rx) } #[allow(dead_code)] pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) { if config_id != self.view.get_current_config_id() { // TODO: Figure out why &String does not impl Value // info!( // target: "Failure notification from old config.", // subject = subject, // config = self.view.get_current_config_id(), // old_config = config_id // ); // return; } let alert = proto::Alert { src: self.host_addr.clone(), dst: subject.clone(), edge_status: proto::EdgeStatus::Down, config_id, node_id: None, ring_number: self .view .get_ring_numbers(&self.host_addr, &subject) .expect("Unable to get ring number"), metadata: None, }; self.enqueue_alert(alert); } pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> { if !self.alerts.is_empty() && (Instant::now() - self.last_enqueued_alert) > self.batch_window { let alerts = self.alerts.drain(..).collect(); Some(proto::BatchedAlertMessage { sender: self.host_addr.clone(), alerts, }) } else { None } } pub fn enqueue_alert(&mut self, alert: proto::Alert) { self.last_enqueued_alert = Instant::now(); self.alerts.push_back(alert); } /// This is invoked when the consensus module decides on a proposal /// /// Any node that is not in the membership list will be added to the cluster, /// and any node that is currently in the membership list, but not in the proposal /// will be removed. pub fn on_decide(&mut self, proposal: Vec<Endpoint>) { // TODO: Handle metadata updates // TODO: Handle subscriptions self.cancel_failure_detectors(); for node in &proposal { if self.view.is_host_present(&node) { self.view.ring_delete(&node); } else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) { self.view.ring_add(node.clone(), node_id); } else { panic!("Node not present in pre-join metadata") } } let _current_config_id = self.view.get_current_config_id(); // clear data structures self.cut_detector.clear(); self.announced_proposal = false; if self.view.is_host_present(&self.host_addr) { // TODO: inform edge failure detector about config change } else { // We need to gracefully exit by calling a user handler and invalidating the current // session unimplemented!("How do you manage a callback again?"); } // TODO: Instantiate new consensus instance // self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), ) self.respond_to_joiners(proposal); } fn
(&mut self) { for signal in self.monitor_cancellers.drain(..) { let _ = signal.send(()); } } fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) { let configuration = self.view.get_config(); let join_res = JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: configuration.config_id(), endpoints: configuration.endpoints.clone(), identifiers: configuration.node_ids.clone(), cluster_metadata: HashMap::new(), // TODO: metadata manager }; for node in proposal { self.messages .push_back((node, proto::ResponseKind::Join(join_res.clone()).into())); // self.joiners_to_respond.remove(&node).and_then(|joiners| { // joiners.into_iter().for_each(|joiner| { // joiner // .send(Ok(Response::new_join(join_res.clone()))) // .expect("Unable to send response"); // }); // // This is so the compiler can infer the type of the closure to be Option<()> // Some(()) // }); } } pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> { let mut msgs = Vec::new(); while let Some(msg) = self.messages.pop_front() { msgs.push(msg); } msgs } }
cancel_failure_detectors
identifier_name
mod.rs
pub mod cut_detector; pub mod ring; pub mod view; use crate::{ common::{ConfigId, Endpoint, NodeId, Scheduler, SchedulerEvents}, consensus::FastPaxos, error::Result, event::{Event, NodeStatusChange}, monitor::Monitor, transport::{ proto::{ self, Alert, BatchedAlertMessage, EdgeStatus, JoinMessage, JoinResponse, JoinStatus, Metadata, NodeStatus, PreJoinMessage, }, Message, Request, Response, }, }; use cut_detector::CutDetector; use view::View; use futures::FutureExt; use std::{ collections::{HashMap, VecDeque}, time::{Duration, Instant}, }; use tokio::sync::{broadcast, mpsc, oneshot}; use tracing::info; type OutboundResponse = oneshot::Sender<crate::Result<Response>>; #[derive(Debug)] pub struct Membership<M> { host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, alerts: VecDeque<proto::Alert>, last_enqueued_alert: Instant, joiners_to_respond: Vec<Endpoint>, // joiners_to_respond: HashMap<Endpoint, VecDeque<OutboundResponse>>, batch_window: Duration, paxos: FastPaxos, announced_proposal: bool, joiner_data: HashMap<Endpoint, (NodeId, Metadata)>, event_tx: broadcast::Sender<Event>, monitor_cancellers: Vec<oneshot::Sender<()>>, messages: VecDeque<(Endpoint, Message)>, } impl<M: Monitor> Membership<M> { #[allow(dead_code)] pub fn new( host_addr: Endpoint, view: View, cut_detector: CutDetector, monitor: M, event_tx: broadcast::Sender<Event>, ) -> Self { // TODO: setup startup tasks let paxos = FastPaxos::new( host_addr.clone(), view.get_membership_size(), view.get_current_config_id(), ); Self { host_addr, view, cut_detector, monitor, paxos, alerts: VecDeque::default(), last_enqueued_alert: Instant::now(), joiners_to_respond: Vec::new(), batch_window: Duration::new(10, 0), announced_proposal: false, joiner_data: HashMap::default(), monitor_cancellers: vec![], event_tx, messages: VecDeque::new(), } } #[allow(dead_code)] fn send_initial_notification(&self) { self.event_tx .send(Event::ViewChange(self.get_inititial_view_changes())) .expect("Unable to send response"); } fn get_inititial_view_changes(&self) -> Vec<NodeStatusChange>
pub fn view(&self) -> Vec<&Endpoint> { self.view .get_ring(0) .expect("There is always a ring!") .iter() .collect() } pub fn step(&mut self, from: Endpoint, msg: proto::RequestKind) { use proto::RequestKind::*; match msg { PreJoin(msg) => self.handle_pre_join(from, msg), Join(msg) => self.handle_join(from, msg), BatchedAlert(msg) => self.handle_batched_alert_message(msg), Consensus(msg) => { let view = self .view .get_ring(0) .expect("Ring zero should always exist") .iter() .collect(); let msgs = self.paxos.step(msg, view); self.messages.extend(msgs); } _ => todo!("request type not implemented yet"), } } pub fn start_classic_round(&mut self) -> Result<()> { // TODO: make paxos syncrhonous // self.paxos.start_classic_round() todo!() } pub fn handle_pre_join(&mut self, from: Endpoint, msg: PreJoinMessage) { let PreJoinMessage { sender, node_id, .. } = msg; let status = self.view.is_safe_to_join(&sender, &node_id); let config_id = self.view.get_config().config_id(); let endpoints = if status == JoinStatus::SafeToJoin || status == JoinStatus::HostnameAlreadyInRing { self.view.get_expected_observers(&sender) } else { Vec::new() }; let join_res = JoinResponse { sender, status, config_id, endpoints, identifiers: Vec::new(), cluster_metadata: HashMap::new(), }; info!( message = "Join at seed.", seed = %self.host_addr, sender = %join_res.sender, config = %join_res.config_id, size = %self.view.get_membership_size() ); self.messages .push_back((from, proto::ResponseKind::Join(join_res).into())); } pub fn handle_join(&mut self, from: Endpoint, msg: JoinMessage) { if msg.config_id == self.view.get_current_config_id() { let config = self.view.get_config(); // TODO: do we still need to do this? // self.joiners_to_respond // .entry(msg.sender.clone()) // .or_insert_with(VecDeque::new) // .push_back(from); let alert = proto::Alert { src: self.host_addr.clone(), dst: msg.sender.clone(), edge_status: proto::EdgeStatus::Up, config_id: config.config_id(), node_id: Some(msg.node_id.clone()), ring_number: msg.ring_number, metadata: None, }; self.enqueue_alert(alert); } else { // This is the case where the config changed between phase 1 // and phase 2 of the join process. let response = if self.view.is_host_present(&msg.sender) && self.view.is_node_id_present(&msg.node_id) { let config = self.view.get_config(); // Race condition where a observer already crossed H messages for the joiner and // changed the configuration, but the JoinPhase2 message shows up at the observer // after it has already added the joiner. In this case, simply tell the joiner it's // safe to join proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: config.config_id(), endpoints: config.endpoints.clone(), identifiers: config.node_ids.clone(), cluster_metadata: HashMap::new(), } } else { proto::JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::ConfigChanged, config_id: self.view.get_current_config_id(), endpoints: vec![], identifiers: vec![], cluster_metadata: HashMap::new(), } }; self.messages .push_back((from, proto::ResponseKind::Join(response).into())); } } // Invoked by observers of a node for failure detection fn handle_probe_message(&self) -> Response { Response::new_probe(NodeStatus::Up) // TODO: FIXME THIS IS WRONG } // Receives edge update events and delivers them to the cut detector to check if it will // return a valid proposal. // // Edge update messages that do not affect the ongoing proposal need to be dropped. fn handle_batched_alert_message(&mut self, msg_batch: BatchedAlertMessage) { let current_config_id = self.view.get_current_config_id(); let size = self.view.get_membership_size(); let mut proposal: Vec<Endpoint> = msg_batch .alerts .iter() // filter out messages which violate membership invariants // And then run the cut detector to see if there is a new proposal .filter_map(|message| { if !self.filter_alert_messages(&msg_batch, message, size, current_config_id) { return None; } Some(self.cut_detector.aggregate(message)) }) .flatten() .collect(); proposal.extend(self.cut_detector.invalidate_failing_edges(&mut self.view)); if !proposal.is_empty() { self.announced_proposal = true; self.event_tx .send(Event::ViewChangeProposal( self.create_node_status_change_list(proposal.clone()), )) .expect("Unable to send response"); // TODO: make paxos syncrhonous // self.paxos.propose(proposal, scheduler).await? } } fn create_node_status_change_list(&self, proposal: Vec<Endpoint>) -> Vec<NodeStatusChange> { proposal .iter() .map(|node| NodeStatusChange { endpoint: node.to_string(), status: if self.view.is_host_present(node) { EdgeStatus::Down } else { EdgeStatus::Up }, metadata: Metadata::default(), }) .collect() } // Filter for removing invalid edge update messages. These include messages // that were for a configuration that the current node is not a part of, and messages // that violate teh semantics of being a part of a configuration fn filter_alert_messages( &mut self, _message_batch: &BatchedAlertMessage, // Might require this later for loggign message: &Alert, _size: usize, config_id: ConfigId, ) -> bool { let dst = &message.dst; if config_id != message.config_id { return false; } // An invariant to maintain is that a node can only go into the membership set once // and leave it once if message.edge_status == EdgeStatus::Down && !self.view.is_host_present(&dst) { return false; } if message.edge_status == EdgeStatus::Up { // Add joiner data after the node is done being added to the set. Store in a // temp location for now. self.joiner_data.insert( dst.clone(), ( message.node_id.clone().take().unwrap(), message.metadata.clone().take().unwrap(), ), ); } true } pub fn create_failure_detectors( &mut self, scheduler: &mut Scheduler, ) -> Result<mpsc::Receiver<(Endpoint, ConfigId)>> { todo!() // let (tx, rx) = mpsc::channel(1000); // for subject in self.view.get_subjects(&self.host_addr)? { // let (mon_tx, mon_rx) = oneshot::channel(); // let fut = self.monitor.monitor( // subject.clone(), // client.clone(), // self.view.get_current_config_id(), // tx.clone(), // mon_rx, // ); // scheduler.push(Box::pin(fut.map(|_| SchedulerEvents::None))); // self.monitor_cancellers.push(mon_tx); // } // Ok(rx) } #[allow(dead_code)] pub fn edge_failure_notification(&mut self, subject: Endpoint, config_id: ConfigId) { if config_id != self.view.get_current_config_id() { // TODO: Figure out why &String does not impl Value // info!( // target: "Failure notification from old config.", // subject = subject, // config = self.view.get_current_config_id(), // old_config = config_id // ); // return; } let alert = proto::Alert { src: self.host_addr.clone(), dst: subject.clone(), edge_status: proto::EdgeStatus::Down, config_id, node_id: None, ring_number: self .view .get_ring_numbers(&self.host_addr, &subject) .expect("Unable to get ring number"), metadata: None, }; self.enqueue_alert(alert); } pub fn get_batch_alerts(&mut self) -> Option<proto::BatchedAlertMessage> { if !self.alerts.is_empty() && (Instant::now() - self.last_enqueued_alert) > self.batch_window { let alerts = self.alerts.drain(..).collect(); Some(proto::BatchedAlertMessage { sender: self.host_addr.clone(), alerts, }) } else { None } } pub fn enqueue_alert(&mut self, alert: proto::Alert) { self.last_enqueued_alert = Instant::now(); self.alerts.push_back(alert); } /// This is invoked when the consensus module decides on a proposal /// /// Any node that is not in the membership list will be added to the cluster, /// and any node that is currently in the membership list, but not in the proposal /// will be removed. pub fn on_decide(&mut self, proposal: Vec<Endpoint>) { // TODO: Handle metadata updates // TODO: Handle subscriptions self.cancel_failure_detectors(); for node in &proposal { if self.view.is_host_present(&node) { self.view.ring_delete(&node); } else if let Some((node_id, _metadata)) = self.joiner_data.remove(node) { self.view.ring_add(node.clone(), node_id); } else { panic!("Node not present in pre-join metadata") } } let _current_config_id = self.view.get_current_config_id(); // clear data structures self.cut_detector.clear(); self.announced_proposal = false; if self.view.is_host_present(&self.host_addr) { // TODO: inform edge failure detector about config change } else { // We need to gracefully exit by calling a user handler and invalidating the current // session unimplemented!("How do you manage a callback again?"); } // TODO: Instantiate new consensus instance // self.paxos = FastPaxos::new(self.host_addr, self.view.get_membership_size(), ) self.respond_to_joiners(proposal); } fn cancel_failure_detectors(&mut self) { for signal in self.monitor_cancellers.drain(..) { let _ = signal.send(()); } } fn respond_to_joiners(&mut self, proposal: Vec<Endpoint>) { let configuration = self.view.get_config(); let join_res = JoinResponse { sender: self.host_addr.clone(), status: JoinStatus::SafeToJoin, config_id: configuration.config_id(), endpoints: configuration.endpoints.clone(), identifiers: configuration.node_ids.clone(), cluster_metadata: HashMap::new(), // TODO: metadata manager }; for node in proposal { self.messages .push_back((node, proto::ResponseKind::Join(join_res.clone()).into())); // self.joiners_to_respond.remove(&node).and_then(|joiners| { // joiners.into_iter().for_each(|joiner| { // joiner // .send(Ok(Response::new_join(join_res.clone()))) // .expect("Unable to send response"); // }); // // This is so the compiler can infer the type of the closure to be Option<()> // Some(()) // }); } } pub fn drain_messages(&mut self) -> Vec<(Endpoint, Message)> { let mut msgs = Vec::new(); while let Some(msg) = self.messages.pop_front() { msgs.push(msg); } msgs } }
{ let nodes = self.view.get_ring(0); nodes .iter() .map(|_| NodeStatusChange { endpoint: self.host_addr.clone(), status: EdgeStatus::Up, metadata: Metadata::default(), }) .collect() }
identifier_body
font.rs
use prelude::*; use core::{self, Layer, Context, Color, Point2, Rect}; use core::builder::*; use rusttype; use backends::backend; use font_loader::system_fonts; static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; /// A font used for writing on a [`Layer`](struct.Layer.html). /// /// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or /// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter. /// /// In addition to the usual properties of a font, radiant also assigns a fixed size /// to each font object. Instead of modifying this value, you can clone a new font /// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size). #[derive(Clone)] pub struct Font { data : Vec<u8>, font_id : usize, size : f32, context : Context, } impl Debug for Font { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Font") .field("data_len", &self.data.len()) .field("font_id", &self.font_id) .field("size", &self.size) .finish() } } impl Font { /// Returns a [font builder](support/struct.FontBuilder.html) for font construction. /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// # let display = Display::builder().hidden().build().unwrap(); /// # let renderer = Renderer::new(&display).unwrap(); /// # let context = display.context(); /// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap(); /// ``` pub fn builder(context: &Context) -> FontBuilder { FontBuilder::new(context) } /// Creates a font instance from a file. pub fn from_file(context: &Context, file: &str) -> core::Result<Font> { use std::io::Read; let mut f = File::open(Path::new(file))?; let mut font_data = Vec::new(); f.read_to_end(&mut font_data)?; Ok(Self::create(context, font_data, 12.0)) } /// Returns the names of all available system fonts. pub fn query_all() -> Vec<String> { system_fonts::query_all() } /// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace). /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// let monospace_fonts = Font::query().monospace().italic().fetch(); /// ``` pub fn query() -> FontQueryBuilder { FontQueryBuilder::new() } /// Returns a new font instance with given size. pub fn clone_with_size(self: &Self, size: f32) -> Font { let mut font = (*self).clone(); font.size = size; font } /// Write to given layer. pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels. pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling. pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> { let position = Point2::from(position); let scale = Point2::from(scale); self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1); self } /// Returns the font wrapped in an std::Arc. pub fn arc(self: Self) -> Arc<Self> { Arc::new(self) } /// Returns the names of all available system fonts with the given properties (e.g. monospace). pub(crate) fn query_specific(info: FontInfo) -> Vec<String> { system_fonts::query_specific(&mut Self::build_property(&info)) } /// Creates a new font instance from given FontInfo struct. pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> { if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) { Ok(Self::create(context, font_data, info.size)) } else { Err(core::Error::FontError("Failed to get system font".to_string())) } } /// Creates a new unique font fn
(context: &Context, font_data: Vec<u8>, size: f32) -> Font { Font { data : font_data, font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed), size : size, context : context.clone(), } } /// Write text to given layer using given font fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) { // !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap(); let bucket_id = 0; let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text); let context = self.context.lock(); context.font_cache.queue(self.font_id, &glyphs); let anchor = (0., 0.); let scale = (scale_x, scale_y); let cos_rot = rotation.cos(); let sin_rot = rotation.sin(); for glyph in &glyphs { if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) { let dist_x = pos.0 * scale_x; let dist_y = pos.1 * scale_y; let offset_x = x + dist_x * cos_rot - dist_y * sin_rot; let offset_y = y + dist_x * sin_rot + dist_y * cos_rot; layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale); } } } /// Layout a paragraph of glyphs fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> { use unicode_normalization::UnicodeNormalization; let mut result = Vec::new(); let v_metrics = font.v_metrics(scale); let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap; let mut caret = rusttype::point(0.0, v_metrics.ascent); let mut last_glyph_id = None; for c in text.nfc() { if c.is_control() { match c { '\n' => { caret = rusttype::point(0.0, caret.y + advance_height); }, _ => {} } continue; } let base_glyph = font.glyph(c); if let Some(id) = last_glyph_id.take() { caret.x += font.pair_kerning(scale, id, base_glyph.id()); } last_glyph_id = Some(base_glyph.id()); let mut glyph = base_glyph.scaled(scale).positioned(caret); if let Some(bb) = glyph.pixel_bounding_box() { if width > 0.0 && bb.max.x > width as i32 { caret = rusttype::point(0.0, caret.y + advance_height); glyph = glyph.into_unpositioned().positioned(caret); last_glyph_id = None; } } caret.x += glyph.unpositioned().h_metrics().advance_width; result.push(glyph); } result } /// Builds a FontProperty for the underlying system_fonts library fn build_property(info: &FontInfo) -> system_fonts::FontProperty { let mut property = system_fonts::FontPropertyBuilder::new(); if info.family != "" { property = property.family(&info.family); } if info.italic { property = property.italic(); } if info.oblique { property = property.oblique(); } if info.bold { property = property.bold(); } if info.monospace { property = property.monospace(); } property.build() } } /// A wrapper around rusttype's font cache. pub struct FontCache { cache : Mutex<rusttype::gpu_cache::Cache<'static>>, queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>, dirty : AtomicBool, } impl FontCache { /// Creates a new fontcache instant. pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache { let cache = rusttype::gpu_cache::CacheBuilder { width, height, scale_tolerance, position_tolerance, pad_glyphs: true, }.build(); FontCache { cache: Mutex::new(cache), queue: Mutex::new(Vec::new()), dirty: AtomicBool::new(false), } } /// Queues a glyph for caching. pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) { let mut cache = self.cache.lock().unwrap(); let mut queue = self.queue.lock().unwrap(); let mut dirties = false; for glyph in glyphs { cache.queue_glyph(font_id, glyph.standalone()); } cache.cache_queued(|rect, data| { queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) ); dirties = true; }).unwrap(); if dirties { self.dirty.store(dirties, Ordering::Relaxed); } } /// Updates the font cache texture. pub fn update(self: &Self, texture: &backend::Texture2d) { if self.dirty.load(Ordering::Relaxed) { let mut queue = self.queue.lock().unwrap(); for &(ref rect, ref data) in queue.deref() { texture.write(rect, data); } queue.clear(); self.dirty.store(false, Ordering::Relaxed); } } /// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions. pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> { let cache = self.cache.lock().unwrap(); if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) { let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y)); let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32); let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32); Some((uv, pos, dim)) } else { None } } } /// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific) /// or to describe a [`Font`](struct.Font.html) to be created from a system font /// via [`Font::from_info()`](struct.Font.html#method.from_info). #[derive(Clone)] pub struct FontInfo { pub italic : bool, pub oblique : bool, pub bold : bool, pub monospace : bool, pub family : String, pub size : f32, } impl Default for FontInfo { fn default() -> FontInfo { FontInfo { italic : false, oblique : false, bold : false, monospace : false, family : "".to_string(), size : 10.0, } } }
create
identifier_name
font.rs
use prelude::*; use core::{self, Layer, Context, Color, Point2, Rect}; use core::builder::*; use rusttype; use backends::backend; use font_loader::system_fonts; static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; /// A font used for writing on a [`Layer`](struct.Layer.html). /// /// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or /// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter. /// /// In addition to the usual properties of a font, radiant also assigns a fixed size /// to each font object. Instead of modifying this value, you can clone a new font /// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size). #[derive(Clone)] pub struct Font { data : Vec<u8>, font_id : usize, size : f32, context : Context, } impl Debug for Font { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Font") .field("data_len", &self.data.len()) .field("font_id", &self.font_id) .field("size", &self.size) .finish() } } impl Font { /// Returns a [font builder](support/struct.FontBuilder.html) for font construction. /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// # let display = Display::builder().hidden().build().unwrap(); /// # let renderer = Renderer::new(&display).unwrap(); /// # let context = display.context(); /// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap(); /// ``` pub fn builder(context: &Context) -> FontBuilder { FontBuilder::new(context) } /// Creates a font instance from a file. pub fn from_file(context: &Context, file: &str) -> core::Result<Font> { use std::io::Read; let mut f = File::open(Path::new(file))?; let mut font_data = Vec::new(); f.read_to_end(&mut font_data)?; Ok(Self::create(context, font_data, 12.0)) } /// Returns the names of all available system fonts. pub fn query_all() -> Vec<String> { system_fonts::query_all() } /// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace). /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// let monospace_fonts = Font::query().monospace().italic().fetch(); /// ``` pub fn query() -> FontQueryBuilder { FontQueryBuilder::new() } /// Returns a new font instance with given size. pub fn clone_with_size(self: &Self, size: f32) -> Font { let mut font = (*self).clone(); font.size = size; font } /// Write to given layer. pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels. pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling. pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> { let position = Point2::from(position); let scale = Point2::from(scale); self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1); self } /// Returns the font wrapped in an std::Arc. pub fn arc(self: Self) -> Arc<Self> { Arc::new(self) } /// Returns the names of all available system fonts with the given properties (e.g. monospace). pub(crate) fn query_specific(info: FontInfo) -> Vec<String> { system_fonts::query_specific(&mut Self::build_property(&info)) } /// Creates a new font instance from given FontInfo struct. pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font>
/// Creates a new unique font fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font { Font { data : font_data, font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed), size : size, context : context.clone(), } } /// Write text to given layer using given font fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) { // !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap(); let bucket_id = 0; let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text); let context = self.context.lock(); context.font_cache.queue(self.font_id, &glyphs); let anchor = (0., 0.); let scale = (scale_x, scale_y); let cos_rot = rotation.cos(); let sin_rot = rotation.sin(); for glyph in &glyphs { if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) { let dist_x = pos.0 * scale_x; let dist_y = pos.1 * scale_y; let offset_x = x + dist_x * cos_rot - dist_y * sin_rot; let offset_y = y + dist_x * sin_rot + dist_y * cos_rot; layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale); } } } /// Layout a paragraph of glyphs fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> { use unicode_normalization::UnicodeNormalization; let mut result = Vec::new(); let v_metrics = font.v_metrics(scale); let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap; let mut caret = rusttype::point(0.0, v_metrics.ascent); let mut last_glyph_id = None; for c in text.nfc() { if c.is_control() { match c { '\n' => { caret = rusttype::point(0.0, caret.y + advance_height); }, _ => {} } continue; } let base_glyph = font.glyph(c); if let Some(id) = last_glyph_id.take() { caret.x += font.pair_kerning(scale, id, base_glyph.id()); } last_glyph_id = Some(base_glyph.id()); let mut glyph = base_glyph.scaled(scale).positioned(caret); if let Some(bb) = glyph.pixel_bounding_box() { if width > 0.0 && bb.max.x > width as i32 { caret = rusttype::point(0.0, caret.y + advance_height); glyph = glyph.into_unpositioned().positioned(caret); last_glyph_id = None; } } caret.x += glyph.unpositioned().h_metrics().advance_width; result.push(glyph); } result } /// Builds a FontProperty for the underlying system_fonts library fn build_property(info: &FontInfo) -> system_fonts::FontProperty { let mut property = system_fonts::FontPropertyBuilder::new(); if info.family != "" { property = property.family(&info.family); } if info.italic { property = property.italic(); } if info.oblique { property = property.oblique(); } if info.bold { property = property.bold(); } if info.monospace { property = property.monospace(); } property.build() } } /// A wrapper around rusttype's font cache. pub struct FontCache { cache : Mutex<rusttype::gpu_cache::Cache<'static>>, queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>, dirty : AtomicBool, } impl FontCache { /// Creates a new fontcache instant. pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache { let cache = rusttype::gpu_cache::CacheBuilder { width, height, scale_tolerance, position_tolerance, pad_glyphs: true, }.build(); FontCache { cache: Mutex::new(cache), queue: Mutex::new(Vec::new()), dirty: AtomicBool::new(false), } } /// Queues a glyph for caching. pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) { let mut cache = self.cache.lock().unwrap(); let mut queue = self.queue.lock().unwrap(); let mut dirties = false; for glyph in glyphs { cache.queue_glyph(font_id, glyph.standalone()); } cache.cache_queued(|rect, data| { queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) ); dirties = true; }).unwrap(); if dirties { self.dirty.store(dirties, Ordering::Relaxed); } } /// Updates the font cache texture. pub fn update(self: &Self, texture: &backend::Texture2d) { if self.dirty.load(Ordering::Relaxed) { let mut queue = self.queue.lock().unwrap(); for &(ref rect, ref data) in queue.deref() { texture.write(rect, data); } queue.clear(); self.dirty.store(false, Ordering::Relaxed); } } /// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions. pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> { let cache = self.cache.lock().unwrap(); if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) { let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y)); let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32); let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32); Some((uv, pos, dim)) } else { None } } } /// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific) /// or to describe a [`Font`](struct.Font.html) to be created from a system font /// via [`Font::from_info()`](struct.Font.html#method.from_info). #[derive(Clone)] pub struct FontInfo { pub italic : bool, pub oblique : bool, pub bold : bool, pub monospace : bool, pub family : String, pub size : f32, } impl Default for FontInfo { fn default() -> FontInfo { FontInfo { italic : false, oblique : false, bold : false, monospace : false, family : "".to_string(), size : 10.0, } } }
{ if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) { Ok(Self::create(context, font_data, info.size)) } else { Err(core::Error::FontError("Failed to get system font".to_string())) } }
identifier_body
font.rs
use prelude::*; use core::{self, Layer, Context, Color, Point2, Rect}; use core::builder::*; use rusttype; use backends::backend; use font_loader::system_fonts; static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; /// A font used for writing on a [`Layer`](struct.Layer.html). /// /// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or /// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter. /// /// In addition to the usual properties of a font, radiant also assigns a fixed size /// to each font object. Instead of modifying this value, you can clone a new font /// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size). #[derive(Clone)] pub struct Font { data : Vec<u8>, font_id : usize, size : f32, context : Context, } impl Debug for Font { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Font") .field("data_len", &self.data.len()) .field("font_id", &self.font_id)
.finish() } } impl Font { /// Returns a [font builder](support/struct.FontBuilder.html) for font construction. /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// # let display = Display::builder().hidden().build().unwrap(); /// # let renderer = Renderer::new(&display).unwrap(); /// # let context = display.context(); /// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap(); /// ``` pub fn builder(context: &Context) -> FontBuilder { FontBuilder::new(context) } /// Creates a font instance from a file. pub fn from_file(context: &Context, file: &str) -> core::Result<Font> { use std::io::Read; let mut f = File::open(Path::new(file))?; let mut font_data = Vec::new(); f.read_to_end(&mut font_data)?; Ok(Self::create(context, font_data, 12.0)) } /// Returns the names of all available system fonts. pub fn query_all() -> Vec<String> { system_fonts::query_all() } /// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace). /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// let monospace_fonts = Font::query().monospace().italic().fetch(); /// ``` pub fn query() -> FontQueryBuilder { FontQueryBuilder::new() } /// Returns a new font instance with given size. pub fn clone_with_size(self: &Self, size: f32) -> Font { let mut font = (*self).clone(); font.size = size; font } /// Write to given layer. pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels. pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling. pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> { let position = Point2::from(position); let scale = Point2::from(scale); self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1); self } /// Returns the font wrapped in an std::Arc. pub fn arc(self: Self) -> Arc<Self> { Arc::new(self) } /// Returns the names of all available system fonts with the given properties (e.g. monospace). pub(crate) fn query_specific(info: FontInfo) -> Vec<String> { system_fonts::query_specific(&mut Self::build_property(&info)) } /// Creates a new font instance from given FontInfo struct. pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> { if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) { Ok(Self::create(context, font_data, info.size)) } else { Err(core::Error::FontError("Failed to get system font".to_string())) } } /// Creates a new unique font fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font { Font { data : font_data, font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed), size : size, context : context.clone(), } } /// Write text to given layer using given font fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) { // !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap(); let bucket_id = 0; let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text); let context = self.context.lock(); context.font_cache.queue(self.font_id, &glyphs); let anchor = (0., 0.); let scale = (scale_x, scale_y); let cos_rot = rotation.cos(); let sin_rot = rotation.sin(); for glyph in &glyphs { if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) { let dist_x = pos.0 * scale_x; let dist_y = pos.1 * scale_y; let offset_x = x + dist_x * cos_rot - dist_y * sin_rot; let offset_y = y + dist_x * sin_rot + dist_y * cos_rot; layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale); } } } /// Layout a paragraph of glyphs fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> { use unicode_normalization::UnicodeNormalization; let mut result = Vec::new(); let v_metrics = font.v_metrics(scale); let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap; let mut caret = rusttype::point(0.0, v_metrics.ascent); let mut last_glyph_id = None; for c in text.nfc() { if c.is_control() { match c { '\n' => { caret = rusttype::point(0.0, caret.y + advance_height); }, _ => {} } continue; } let base_glyph = font.glyph(c); if let Some(id) = last_glyph_id.take() { caret.x += font.pair_kerning(scale, id, base_glyph.id()); } last_glyph_id = Some(base_glyph.id()); let mut glyph = base_glyph.scaled(scale).positioned(caret); if let Some(bb) = glyph.pixel_bounding_box() { if width > 0.0 && bb.max.x > width as i32 { caret = rusttype::point(0.0, caret.y + advance_height); glyph = glyph.into_unpositioned().positioned(caret); last_glyph_id = None; } } caret.x += glyph.unpositioned().h_metrics().advance_width; result.push(glyph); } result } /// Builds a FontProperty for the underlying system_fonts library fn build_property(info: &FontInfo) -> system_fonts::FontProperty { let mut property = system_fonts::FontPropertyBuilder::new(); if info.family != "" { property = property.family(&info.family); } if info.italic { property = property.italic(); } if info.oblique { property = property.oblique(); } if info.bold { property = property.bold(); } if info.monospace { property = property.monospace(); } property.build() } } /// A wrapper around rusttype's font cache. pub struct FontCache { cache : Mutex<rusttype::gpu_cache::Cache<'static>>, queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>, dirty : AtomicBool, } impl FontCache { /// Creates a new fontcache instant. pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache { let cache = rusttype::gpu_cache::CacheBuilder { width, height, scale_tolerance, position_tolerance, pad_glyphs: true, }.build(); FontCache { cache: Mutex::new(cache), queue: Mutex::new(Vec::new()), dirty: AtomicBool::new(false), } } /// Queues a glyph for caching. pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) { let mut cache = self.cache.lock().unwrap(); let mut queue = self.queue.lock().unwrap(); let mut dirties = false; for glyph in glyphs { cache.queue_glyph(font_id, glyph.standalone()); } cache.cache_queued(|rect, data| { queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) ); dirties = true; }).unwrap(); if dirties { self.dirty.store(dirties, Ordering::Relaxed); } } /// Updates the font cache texture. pub fn update(self: &Self, texture: &backend::Texture2d) { if self.dirty.load(Ordering::Relaxed) { let mut queue = self.queue.lock().unwrap(); for &(ref rect, ref data) in queue.deref() { texture.write(rect, data); } queue.clear(); self.dirty.store(false, Ordering::Relaxed); } } /// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions. pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> { let cache = self.cache.lock().unwrap(); if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) { let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y)); let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32); let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32); Some((uv, pos, dim)) } else { None } } } /// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific) /// or to describe a [`Font`](struct.Font.html) to be created from a system font /// via [`Font::from_info()`](struct.Font.html#method.from_info). #[derive(Clone)] pub struct FontInfo { pub italic : bool, pub oblique : bool, pub bold : bool, pub monospace : bool, pub family : String, pub size : f32, } impl Default for FontInfo { fn default() -> FontInfo { FontInfo { italic : false, oblique : false, bold : false, monospace : false, family : "".to_string(), size : 10.0, } } }
.field("size", &self.size)
random_line_split
font.rs
use prelude::*; use core::{self, Layer, Context, Color, Point2, Rect}; use core::builder::*; use rusttype; use backends::backend; use font_loader::system_fonts; static FONT_COUNTER: AtomicUsize = ATOMIC_USIZE_INIT; /// A font used for writing on a [`Layer`](struct.Layer.html). /// /// Use [`Font::builder()`](#method.builder) to create a new font from a registered system font or /// a local file. The [`Font::from_file()`](#method.from_file) is a shortcut to achieve the latter. /// /// In addition to the usual properties of a font, radiant also assigns a fixed size /// to each font object. Instead of modifying this value, you can clone a new font /// with a different size using [`Font::with_size()`](struct.Font.html#method.with_size). #[derive(Clone)] pub struct Font { data : Vec<u8>, font_id : usize, size : f32, context : Context, } impl Debug for Font { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { f.debug_struct("Font") .field("data_len", &self.data.len()) .field("font_id", &self.font_id) .field("size", &self.size) .finish() } } impl Font { /// Returns a [font builder](support/struct.FontBuilder.html) for font construction. /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// # let display = Display::builder().hidden().build().unwrap(); /// # let renderer = Renderer::new(&display).unwrap(); /// # let context = display.context(); /// let my_font = Font::builder(&context).family("Arial").size(16.0).build().unwrap(); /// ``` pub fn builder(context: &Context) -> FontBuilder { FontBuilder::new(context) } /// Creates a font instance from a file. pub fn from_file(context: &Context, file: &str) -> core::Result<Font> { use std::io::Read; let mut f = File::open(Path::new(file))?; let mut font_data = Vec::new(); f.read_to_end(&mut font_data)?; Ok(Self::create(context, font_data, 12.0)) } /// Returns the names of all available system fonts. pub fn query_all() -> Vec<String> { system_fonts::query_all() } /// Returns a query builder to retrieve the names of all available system fonts with the given properties (e.g. monospace). /// /// # Examples /// /// ```rust /// # use radiant_rs::*; /// let monospace_fonts = Font::query().monospace().italic().fetch(); /// ``` pub fn query() -> FontQueryBuilder { FontQueryBuilder::new() } /// Returns a new font instance with given size. pub fn clone_with_size(self: &Self, size: f32) -> Font { let mut font = (*self).clone(); font.size = size; font } /// Write to given layer. pub fn write<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, 0.0, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels. pub fn write_wrapped<T>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32) -> &Font where Point2<f32>: From<T> { let position = Point2::from(position); self.write_paragraph(layer, text, position.0, position.1, max_width, color, 0.0, 1.0, 1.0); self } /// Write to given layer. Breaks lines after max_width pixels and applies given rotation and scaling. pub fn write_transformed<T, U>(self: &Self, layer: &Layer, text: &str, position: T, color: Color, max_width: f32, rotation: f32, scale: U) -> &Font where Point2<f32>: From<T>+From<U> { let position = Point2::from(position); let scale = Point2::from(scale); self.write_paragraph(layer, text, position.0, position.1, max_width, color, rotation, scale.0, scale.1); self } /// Returns the font wrapped in an std::Arc. pub fn arc(self: Self) -> Arc<Self> { Arc::new(self) } /// Returns the names of all available system fonts with the given properties (e.g. monospace). pub(crate) fn query_specific(info: FontInfo) -> Vec<String> { system_fonts::query_specific(&mut Self::build_property(&info)) } /// Creates a new font instance from given FontInfo struct. pub(crate) fn from_info(context: &Context, info: FontInfo) -> core::Result<Font> { if let Some((font_data, _)) = system_fonts::get(&Self::build_property(&info)) { Ok(Self::create(context, font_data, info.size)) } else { Err(core::Error::FontError("Failed to get system font".to_string())) } } /// Creates a new unique font fn create(context: &Context, font_data: Vec<u8>, size: f32) -> Font { Font { data : font_data, font_id : FONT_COUNTER.fetch_add(1, Ordering::Relaxed), size : size, context : context.clone(), } } /// Write text to given layer using given font fn write_paragraph(self: &Self, layer: &Layer, text: &str, x: f32, y: f32, max_width: f32, color: Color, rotation: f32, scale_x: f32, scale_y: f32) { // !todo probably expensive, but rusttype is completely opaque. would be nice to be able to store Font::info outside of a "may or may not own" container let rt_font = rusttype::FontCollection::from_bytes(&self.data[..]).unwrap().into_font().unwrap(); let bucket_id = 0; let glyphs = Self::layout_paragraph(&rt_font, rusttype::Scale::uniform(self.size), max_width, &text); let context = self.context.lock(); context.font_cache.queue(self.font_id, &glyphs); let anchor = (0., 0.); let scale = (scale_x, scale_y); let cos_rot = rotation.cos(); let sin_rot = rotation.sin(); for glyph in &glyphs { if let Some((uv, pos, dim)) = context.font_cache.rect_for(self.font_id, glyph) { let dist_x = pos.0 * scale_x; let dist_y = pos.1 * scale_y; let offset_x = x + dist_x * cos_rot - dist_y * sin_rot; let offset_y = y + dist_x * sin_rot + dist_y * cos_rot; layer.add_rect(None, bucket_id, 0, 1, uv, (offset_x, offset_y), anchor, dim, color, rotation, scale); } } } /// Layout a paragraph of glyphs fn layout_paragraph<'a>(font: &'a rusttype::Font, scale: rusttype::Scale, width: f32, text: &str) -> Vec<rusttype::PositionedGlyph<'a>> { use unicode_normalization::UnicodeNormalization; let mut result = Vec::new(); let v_metrics = font.v_metrics(scale); let advance_height = v_metrics.ascent - v_metrics.descent + v_metrics.line_gap; let mut caret = rusttype::point(0.0, v_metrics.ascent); let mut last_glyph_id = None; for c in text.nfc() { if c.is_control() { match c { '\n' => { caret = rusttype::point(0.0, caret.y + advance_height); }, _ => {} } continue; } let base_glyph = font.glyph(c); if let Some(id) = last_glyph_id.take() { caret.x += font.pair_kerning(scale, id, base_glyph.id()); } last_glyph_id = Some(base_glyph.id()); let mut glyph = base_glyph.scaled(scale).positioned(caret); if let Some(bb) = glyph.pixel_bounding_box() { if width > 0.0 && bb.max.x > width as i32 { caret = rusttype::point(0.0, caret.y + advance_height); glyph = glyph.into_unpositioned().positioned(caret); last_glyph_id = None; } } caret.x += glyph.unpositioned().h_metrics().advance_width; result.push(glyph); } result } /// Builds a FontProperty for the underlying system_fonts library fn build_property(info: &FontInfo) -> system_fonts::FontProperty { let mut property = system_fonts::FontPropertyBuilder::new(); if info.family != "" { property = property.family(&info.family); } if info.italic { property = property.italic(); } if info.oblique { property = property.oblique(); } if info.bold
if info.monospace { property = property.monospace(); } property.build() } } /// A wrapper around rusttype's font cache. pub struct FontCache { cache : Mutex<rusttype::gpu_cache::Cache<'static>>, queue : Mutex<Vec<(Rect<u32>, Vec<u8>)>>, dirty : AtomicBool, } impl FontCache { /// Creates a new fontcache instant. pub fn new(width: u32, height: u32, scale_tolerance: f32, position_tolerance: f32) -> FontCache { let cache = rusttype::gpu_cache::CacheBuilder { width, height, scale_tolerance, position_tolerance, pad_glyphs: true, }.build(); FontCache { cache: Mutex::new(cache), queue: Mutex::new(Vec::new()), dirty: AtomicBool::new(false), } } /// Queues a glyph for caching. pub fn queue(self: &Self, font_id: usize, glyphs: &[rusttype::PositionedGlyph]) { let mut cache = self.cache.lock().unwrap(); let mut queue = self.queue.lock().unwrap(); let mut dirties = false; for glyph in glyphs { cache.queue_glyph(font_id, glyph.standalone()); } cache.cache_queued(|rect, data| { queue.push( ( ((rect.min.x, rect.min.y), (rect.max.x, rect.max.y)), data.to_vec() ) ); dirties = true; }).unwrap(); if dirties { self.dirty.store(dirties, Ordering::Relaxed); } } /// Updates the font cache texture. pub fn update(self: &Self, texture: &backend::Texture2d) { if self.dirty.load(Ordering::Relaxed) { let mut queue = self.queue.lock().unwrap(); for &(ref rect, ref data) in queue.deref() { texture.write(rect, data); } queue.clear(); self.dirty.store(false, Ordering::Relaxed); } } /// Returns a rectangle of uv coordinates for the given glyph as well as its offset and dimensions. pub fn rect_for(self: &Self, font_id: usize, glyph: &rusttype::PositionedGlyph) -> Option<(Rect, Point2, Point2)> { let cache = self.cache.lock().unwrap(); if let Ok(Some((uv_rect, screen_rect))) = cache.rect_for(font_id, glyph) { let uv = ((uv_rect.min.x, uv_rect.min.y), (uv_rect.max.x, uv_rect.max.y)); let pos = (screen_rect.min.x as f32, screen_rect.min.y as f32); let dim = ((screen_rect.max.x - screen_rect.min.x) as f32, (screen_rect.max.y - screen_rect.min.y) as f32); Some((uv, pos, dim)) } else { None } } } /// A struct used to filter the result of [`Font::query_specific()`](struct.Font.html#method.query_specific) /// or to describe a [`Font`](struct.Font.html) to be created from a system font /// via [`Font::from_info()`](struct.Font.html#method.from_info). #[derive(Clone)] pub struct FontInfo { pub italic : bool, pub oblique : bool, pub bold : bool, pub monospace : bool, pub family : String, pub size : f32, } impl Default for FontInfo { fn default() -> FontInfo { FontInfo { italic : false, oblique : false, bold : false, monospace : false, family : "".to_string(), size : 10.0, } } }
{ property = property.bold(); }
conditional_block
app.js
console.log('sanity check, app.js is connected') //Declare global variables here var map, template, $reviewsList, allReviews = [], classes, batwichSmack = [ 'Wanna know my secret identity?', 'Stick it in your food hole!', 'For whom the BLT tolls.', 'A hotdog is no sandwich.', 'Who wants a knuckle sandwich!?', 'Who you callin turkey!?', 'Swear to me!', "I'm Batwich", 'My parents were eaten when I was young. I took it poorly.', "I'm a cipher, wrapped in an enigma, smothered in secret sauce.", "I don't trust noodles. They're all impastas.", 'My partner is a small fry.' ], heroSmack = [ 'Eat me!', 'Silence of the hams.', 'The po-boy only rings twice.', "I'm pretty sure a hot dog is a sandwich.", 'I hAvE cHaT BuBbLeS!!', 'Whoa, no one called anyone a JT.', 'Stick it in your food hole!', 'I never get soggy.', 'My super power is flavor!', 'Please, do it for the sliders.', 'Potato chips do not belong in a sandwich.', 'Lettuce celebrate!' ]; if(!activeUser){ var activeUser = {} } var giphyApi = "https://api.giphy.com/v1/gifs/search"; if(!(activeUser.reviews)){ activeUser.reviews = [] } // these things only happen once the document is ready $(document).ready(function(){ console.log('The DOM body is ready') console.log('Body parser parsing that body!'); $('.batwich-chat').hide(); $('.hero-chat').hide(); //***************** //***************** //Gif Handlebars templates var sourceOne = $('#selectableGif-template2').html(), templateGif = Handlebars.compile(sourceOne), sourceThree = $('#gif-choice').html(), templateGifChoice = Handlebars.compile(sourceThree), // Review Handlebars template $reviewsList = ('#review-form'), sourceTwo = $("#review-template").html(), templateReview = Handlebars.compile(sourceTwo); //Restaurant Handlebars templates sourceRestaurant = $('#restaurant-template').html(), templateRestaurant = Handlebars.compile(sourceRestaurant), sourceTwoButtons = $('#review-template-buttons').html(), templateReviewButtons = Handlebars.compile(sourceTwoButtons); // this is what submits the form to add a review in $('.new-review').on('submit', function(event) { console.log('submit clicked'); event.preventDefault(); $.ajax({ method: 'POST', url: '/api/reviews', data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // this is what searches giphy for images $('.form-gif').on('submit', function(event){ console.log('gif submit clicked'); event.preventDefault(); $.ajax({ method: 'GET', url: giphyApi, data: $(this).serializeArray(), success: newGifSearchSuccess, error: newGifSearchError }) }) // this is what handles clicking on a gif $('.gifSelectionField2').on('click', '.gifBox', function(event){ $('.gifSelectionField2').empty(); var pickedGifHtml = templateGifChoice({ userChosenGif: this.src}); $('.selected-gif').empty(); $('.selected-gif').append(pickedGifHtml); }) // this is what populates selectable gifs function newGifSearchSuccess(json){ console.log('ajax call for gif successful. Gif: ', json); // empty space to prevent gifs from multiple searches showing at the same time $('.gifSelectionField2').empty(); json.data.forEach(function(gif){ var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url}); $(".gifSelectionField2").append(giphyHtml); }); } // when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint $.ajax({ method: 'GET', url: '/api/reviews', success: appendReviews, error: noAppend }) //when page loads, save active user to a variable $.ajax({ method: 'GET', url: '/api/user/active', success: saveUser, // error: noAppend }) // this is the area that deals with the map //hide map area when page loads $('#hero-map').hide(); // listener for find hero button. hides button to search again until map is moved. $('.map-section').on('click', '#map-button', function(){ console.log('map button pressed'); $('#hero-map').show(); $('.find-hero-button').hide(); // set default location as Hell Mi var defaultLocation = { location: { lat: 42.4347, lng: -83.9850 } } // crete the map using the default location createMap(defaultLocation); }) // creates a google map using location info function createMap(data){ console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng); $('.change-location').hide(); if (document.getElementById('mapPlacement')){ map = new google.maps.Map(document.getElementById('mapPlacement'), { center: {lat: data.location.lat, lng: data.location.lng}, zoom: 15 }) $.ajax({ method: 'POST', url: '/api/locations', data: data, success: showRestaurants, error: noRestaurants }) } else { activeUser.location = data.location; console.log(activeUser); $.ajax({ method: 'POST', url: '/api/locations', data: activeUser, success: appendRestaurants, error: noRestaurants }) } } function noLocation(data){ console.log('could not find location ', data) } // looks at each restaraunt sent from yelp function showRestaurants(data){ console.log('you found restaurants! ', data); data.forEach(function(restaurant){ var location = { lat: restaurant.coordinates.latitude, lng: restaurant.coordinates.longitude } // this is the content that goes on the card associated with each restaurant in the map var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>' addMarker(location, content) }) } // places a marker on the map for each restaraunt function addMarker(position, content){ var myLatlng, marker, infowindow,contentString; // places each marker marker = new google.maps.Marker({ position: position, map: map }); // fills in data for the card that appears when clicking on any marker contentString = content; infowindow = new google.maps.InfoWindow({ content: contentString }); // listen for click to open the window when a marker is clicked on marker.addListener('click', function() { // open the restaraunt info when marker clicked on infowindow.open(map, marker); }); } function noRestaurants(data){ console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data); } //Detects clicking and dragging on the map, shows the button to search $('.hero-map').mousedown(function(){ if ($('.hero-map').mousemove(function(){ })){ $('.change-location').show(600); } }) // Listener for searching where the user currently is $('.current-location').on('click', '#current-location', findLocation) function findLocation (){ console.log('I know where you live!') $.ajax({ method: 'POST', url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg', success: createMap, error: noLocation }) } // Listener for searching where the map is currently centered $('.change-location').on('click', '#change-location', function(){ console.log('Searching in the new map location'); var movedMapLocation = { location: { lat: map.getCenter().lat(), lng: map.getCenter().lng() } } createMap(movedMapLocation); }) // button listener to hide the map area once it's open $('.map-section').on('click', '#hide-map-button', function(){ $('#hero-map').hide(); $('.find-hero-button').show(); }) // this is the end of the map area $('.business-search').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); activeUser.term = $(this).serializeArray()[0].value console.log(activeUser, $(this).serializeArray()) findLocation() }) function appendRestaurants(restaurants){ console.log(restaurants) $('#business-submit-form').removeClass('hidden') $('.restaurant-list').html('') restaurants.forEach(function (restaurant){ $('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name})) }) } $('.business-submit').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); var restaurant = $(this).serializeArray()[0].value; $('.business-forms').addClass('hidden'); $('#review-form').removeClass('hidden'); $('#review-form').prepend('<h2>'+restaurant+'</h2>'); $('#restaurant-input').val(restaurant) console.log(restaurant); }) // this is what spits out each review onto the page. function appendReviews(allReviews) { var reviewHtml; // for each review: allReviews.forEach(function(reviewData){ // create HTML for individual review var reviewInfo = { reviewContent: reviewData.reviewContent, reviewStars: reviewData.stars, // turnary cheking to see if reviewData is true or false - if true return yes, if false return no reviewRecommend: reviewData.recommend ? "Yes" : "No", reviewGif: reviewData.gif, reviewId: reviewData._id, author: reviewData.username }; console.log(reviewData.username) if (activeUser.reviews.indexOf(reviewData._id)>=0){ reviewHtml = templateReviewButtons(reviewInfo) } else { reviewHtml = templateReview(reviewInfo) } if(activeUser._id){ $('.login').hide() $('.sign-up').hide() } else { $('.create').hide() $('.logout').hide() } // add review to top of review area $('.appendReviews').prepend(reviewHtml); }); // listener for pressing the edit review. Directs to edit page. $('.reviewIndividual').on('click', '#edit-button', function(){ localStorage.setItem('classes', $(this).attr("class").split(' ')[0]); console.log('the edit button was pressed! Review Id is ' + classes); window.location.href="../edit"; }) // listener for the create review button. Directs to create page. $('#create-button').on('click', function(){ console.log('the create button was pressed!'); window.location.href="../create"; }) $('.edit-review').on('submit', function(event) { console.log('edit review submit clicked'); event.preventDefault(); $.ajax({ method: 'PUT', url: '/api/reviews/' + localStorage.getItem("classes"), data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // click event for pressing the delete review button. hits the delete route with Id from review $('.reviewIndividual').on('click', '#delete-button', function(){ // sets variable to be the first class associated with this button (which is the id of the review) var classes = $(this).attr("class").split(' ')[0]; console.log('the delete button was pressed! Review Id is ' + classes); $.ajax({ method: 'DELETE', url: '/api/reviews/' + classes, success: deleteReview, error: deleteFailure }) location.reload(); }) // this is the end of append reviews function }; function editReview(data){ console.log('Trying to edit the review below', data); templateReview({ reviewContent: data.reviewContent2 }) console.log('The review was edited', data); return templateReview; window.location.href="../" } function heroChat() { smackTalk = setInterval(function(){ $('.batwich-chat').empty(); $('.hero-chat').empty(); var chance = Math.round(Math.random()); if (chance) { $('.hero-chat').hide(); $('.batwich-chat').show(400); $('.batwich-chat').html(batwichSmack[Math.round(Math.random() * (batwichSmack.length - 1))]); } else { $('.batwich-chat').hide(); $('.hero-chat').show(400); $('.hero-chat').html(heroSmack[Math.round(Math.random() * (heroSmack.length - 1))]); } }, 5500); } heroChat(); // This is the end of on ready function }) function newReviewSuccess(review){ console.log('ajax call on review successful. Review: ', review); window.location.href="../" } function newReviewError(error){ console.log('ajax call on review dun messed up. Error: ', error); } function yelpSuccess(restaurant){ console.log(restaurant) } function yelpError (error){ console.log('ajax call on yelp dun messed up. Error: ', error); } function yelpCallback (data){ console.log('this is the yelp callback', data) } function noAppend (err){ console.log('the reviews did not append', err) } function newGifSearchError(error){ console.log('ajax call on gif search went bad, boss. Error: ', error); } function deleteReview(data){ console.log('delete review triggered!', data); } function deleteFailure(error){ console.log('The delete went bad. Did you delete the right thing? Did you delete everything?', error); } function editFailure(error){ console.log('Oh, no! We have failed to edit! Things remained the same, and you hated that stuff! Error: ', error); } function
(user){ console.log(user) activeUser = user }
saveUser
identifier_name
app.js
console.log('sanity check, app.js is connected') //Declare global variables here var map, template, $reviewsList, allReviews = [], classes, batwichSmack = [ 'Wanna know my secret identity?', 'Stick it in your food hole!', 'For whom the BLT tolls.', 'A hotdog is no sandwich.', 'Who wants a knuckle sandwich!?', 'Who you callin turkey!?', 'Swear to me!', "I'm Batwich", 'My parents were eaten when I was young. I took it poorly.', "I'm a cipher, wrapped in an enigma, smothered in secret sauce.", "I don't trust noodles. They're all impastas.", 'My partner is a small fry.' ], heroSmack = [ 'Eat me!', 'Silence of the hams.', 'The po-boy only rings twice.', "I'm pretty sure a hot dog is a sandwich.", 'I hAvE cHaT BuBbLeS!!', 'Whoa, no one called anyone a JT.', 'Stick it in your food hole!', 'I never get soggy.', 'My super power is flavor!', 'Please, do it for the sliders.', 'Potato chips do not belong in a sandwich.', 'Lettuce celebrate!' ]; if(!activeUser){ var activeUser = {} } var giphyApi = "https://api.giphy.com/v1/gifs/search"; if(!(activeUser.reviews)){ activeUser.reviews = [] } // these things only happen once the document is ready $(document).ready(function(){ console.log('The DOM body is ready') console.log('Body parser parsing that body!'); $('.batwich-chat').hide(); $('.hero-chat').hide(); //***************** //***************** //Gif Handlebars templates var sourceOne = $('#selectableGif-template2').html(), templateGif = Handlebars.compile(sourceOne), sourceThree = $('#gif-choice').html(), templateGifChoice = Handlebars.compile(sourceThree), // Review Handlebars template $reviewsList = ('#review-form'), sourceTwo = $("#review-template").html(), templateReview = Handlebars.compile(sourceTwo); //Restaurant Handlebars templates sourceRestaurant = $('#restaurant-template').html(), templateRestaurant = Handlebars.compile(sourceRestaurant), sourceTwoButtons = $('#review-template-buttons').html(), templateReviewButtons = Handlebars.compile(sourceTwoButtons); // this is what submits the form to add a review in $('.new-review').on('submit', function(event) { console.log('submit clicked'); event.preventDefault(); $.ajax({ method: 'POST', url: '/api/reviews', data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // this is what searches giphy for images $('.form-gif').on('submit', function(event){ console.log('gif submit clicked'); event.preventDefault(); $.ajax({ method: 'GET', url: giphyApi, data: $(this).serializeArray(), success: newGifSearchSuccess, error: newGifSearchError }) }) // this is what handles clicking on a gif $('.gifSelectionField2').on('click', '.gifBox', function(event){ $('.gifSelectionField2').empty(); var pickedGifHtml = templateGifChoice({ userChosenGif: this.src}); $('.selected-gif').empty(); $('.selected-gif').append(pickedGifHtml); }) // this is what populates selectable gifs function newGifSearchSuccess(json){ console.log('ajax call for gif successful. Gif: ', json); // empty space to prevent gifs from multiple searches showing at the same time $('.gifSelectionField2').empty(); json.data.forEach(function(gif){ var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url}); $(".gifSelectionField2").append(giphyHtml); }); } // when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint $.ajax({ method: 'GET', url: '/api/reviews', success: appendReviews, error: noAppend }) //when page loads, save active user to a variable $.ajax({ method: 'GET', url: '/api/user/active', success: saveUser, // error: noAppend }) // this is the area that deals with the map
// listener for find hero button. hides button to search again until map is moved. $('.map-section').on('click', '#map-button', function(){ console.log('map button pressed'); $('#hero-map').show(); $('.find-hero-button').hide(); // set default location as Hell Mi var defaultLocation = { location: { lat: 42.4347, lng: -83.9850 } } // crete the map using the default location createMap(defaultLocation); }) // creates a google map using location info function createMap(data){ console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng); $('.change-location').hide(); if (document.getElementById('mapPlacement')){ map = new google.maps.Map(document.getElementById('mapPlacement'), { center: {lat: data.location.lat, lng: data.location.lng}, zoom: 15 }) $.ajax({ method: 'POST', url: '/api/locations', data: data, success: showRestaurants, error: noRestaurants }) } else { activeUser.location = data.location; console.log(activeUser); $.ajax({ method: 'POST', url: '/api/locations', data: activeUser, success: appendRestaurants, error: noRestaurants }) } } function noLocation(data){ console.log('could not find location ', data) } // looks at each restaraunt sent from yelp function showRestaurants(data){ console.log('you found restaurants! ', data); data.forEach(function(restaurant){ var location = { lat: restaurant.coordinates.latitude, lng: restaurant.coordinates.longitude } // this is the content that goes on the card associated with each restaurant in the map var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>' addMarker(location, content) }) } // places a marker on the map for each restaraunt function addMarker(position, content){ var myLatlng, marker, infowindow,contentString; // places each marker marker = new google.maps.Marker({ position: position, map: map }); // fills in data for the card that appears when clicking on any marker contentString = content; infowindow = new google.maps.InfoWindow({ content: contentString }); // listen for click to open the window when a marker is clicked on marker.addListener('click', function() { // open the restaraunt info when marker clicked on infowindow.open(map, marker); }); } function noRestaurants(data){ console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data); } //Detects clicking and dragging on the map, shows the button to search $('.hero-map').mousedown(function(){ if ($('.hero-map').mousemove(function(){ })){ $('.change-location').show(600); } }) // Listener for searching where the user currently is $('.current-location').on('click', '#current-location', findLocation) function findLocation (){ console.log('I know where you live!') $.ajax({ method: 'POST', url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg', success: createMap, error: noLocation }) } // Listener for searching where the map is currently centered $('.change-location').on('click', '#change-location', function(){ console.log('Searching in the new map location'); var movedMapLocation = { location: { lat: map.getCenter().lat(), lng: map.getCenter().lng() } } createMap(movedMapLocation); }) // button listener to hide the map area once it's open $('.map-section').on('click', '#hide-map-button', function(){ $('#hero-map').hide(); $('.find-hero-button').show(); }) // this is the end of the map area $('.business-search').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); activeUser.term = $(this).serializeArray()[0].value console.log(activeUser, $(this).serializeArray()) findLocation() }) function appendRestaurants(restaurants){ console.log(restaurants) $('#business-submit-form').removeClass('hidden') $('.restaurant-list').html('') restaurants.forEach(function (restaurant){ $('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name})) }) } $('.business-submit').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); var restaurant = $(this).serializeArray()[0].value; $('.business-forms').addClass('hidden'); $('#review-form').removeClass('hidden'); $('#review-form').prepend('<h2>'+restaurant+'</h2>'); $('#restaurant-input').val(restaurant) console.log(restaurant); }) // this is what spits out each review onto the page. function appendReviews(allReviews) { var reviewHtml; // for each review: allReviews.forEach(function(reviewData){ // create HTML for individual review var reviewInfo = { reviewContent: reviewData.reviewContent, reviewStars: reviewData.stars, // turnary cheking to see if reviewData is true or false - if true return yes, if false return no reviewRecommend: reviewData.recommend ? "Yes" : "No", reviewGif: reviewData.gif, reviewId: reviewData._id, author: reviewData.username }; console.log(reviewData.username) if (activeUser.reviews.indexOf(reviewData._id)>=0){ reviewHtml = templateReviewButtons(reviewInfo) } else { reviewHtml = templateReview(reviewInfo) } if(activeUser._id){ $('.login').hide() $('.sign-up').hide() } else { $('.create').hide() $('.logout').hide() } // add review to top of review area $('.appendReviews').prepend(reviewHtml); }); // listener for pressing the edit review. Directs to edit page. $('.reviewIndividual').on('click', '#edit-button', function(){ localStorage.setItem('classes', $(this).attr("class").split(' ')[0]); console.log('the edit button was pressed! Review Id is ' + classes); window.location.href="../edit"; }) // listener for the create review button. Directs to create page. $('#create-button').on('click', function(){ console.log('the create button was pressed!'); window.location.href="../create"; }) $('.edit-review').on('submit', function(event) { console.log('edit review submit clicked'); event.preventDefault(); $.ajax({ method: 'PUT', url: '/api/reviews/' + localStorage.getItem("classes"), data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // click event for pressing the delete review button. hits the delete route with Id from review $('.reviewIndividual').on('click', '#delete-button', function(){ // sets variable to be the first class associated with this button (which is the id of the review) var classes = $(this).attr("class").split(' ')[0]; console.log('the delete button was pressed! Review Id is ' + classes); $.ajax({ method: 'DELETE', url: '/api/reviews/' + classes, success: deleteReview, error: deleteFailure }) location.reload(); }) // this is the end of append reviews function }; function editReview(data){ console.log('Trying to edit the review below', data); templateReview({ reviewContent: data.reviewContent2 }) console.log('The review was edited', data); return templateReview; window.location.href="../" } function heroChat() { smackTalk = setInterval(function(){ $('.batwich-chat').empty(); $('.hero-chat').empty(); var chance = Math.round(Math.random()); if (chance) { $('.hero-chat').hide(); $('.batwich-chat').show(400); $('.batwich-chat').html(batwichSmack[Math.round(Math.random() * (batwichSmack.length - 1))]); } else { $('.batwich-chat').hide(); $('.hero-chat').show(400); $('.hero-chat').html(heroSmack[Math.round(Math.random() * (heroSmack.length - 1))]); } }, 5500); } heroChat(); // This is the end of on ready function }) function newReviewSuccess(review){ console.log('ajax call on review successful. Review: ', review); window.location.href="../" } function newReviewError(error){ console.log('ajax call on review dun messed up. Error: ', error); } function yelpSuccess(restaurant){ console.log(restaurant) } function yelpError (error){ console.log('ajax call on yelp dun messed up. Error: ', error); } function yelpCallback (data){ console.log('this is the yelp callback', data) } function noAppend (err){ console.log('the reviews did not append', err) } function newGifSearchError(error){ console.log('ajax call on gif search went bad, boss. Error: ', error); } function deleteReview(data){ console.log('delete review triggered!', data); } function deleteFailure(error){ console.log('The delete went bad. Did you delete the right thing? Did you delete everything?', error); } function editFailure(error){ console.log('Oh, no! We have failed to edit! Things remained the same, and you hated that stuff! Error: ', error); } function saveUser(user){ console.log(user) activeUser = user }
//hide map area when page loads $('#hero-map').hide();
random_line_split
app.js
console.log('sanity check, app.js is connected') //Declare global variables here var map, template, $reviewsList, allReviews = [], classes, batwichSmack = [ 'Wanna know my secret identity?', 'Stick it in your food hole!', 'For whom the BLT tolls.', 'A hotdog is no sandwich.', 'Who wants a knuckle sandwich!?', 'Who you callin turkey!?', 'Swear to me!', "I'm Batwich", 'My parents were eaten when I was young. I took it poorly.', "I'm a cipher, wrapped in an enigma, smothered in secret sauce.", "I don't trust noodles. They're all impastas.", 'My partner is a small fry.' ], heroSmack = [ 'Eat me!', 'Silence of the hams.', 'The po-boy only rings twice.', "I'm pretty sure a hot dog is a sandwich.", 'I hAvE cHaT BuBbLeS!!', 'Whoa, no one called anyone a JT.', 'Stick it in your food hole!', 'I never get soggy.', 'My super power is flavor!', 'Please, do it for the sliders.', 'Potato chips do not belong in a sandwich.', 'Lettuce celebrate!' ]; if(!activeUser){ var activeUser = {} } var giphyApi = "https://api.giphy.com/v1/gifs/search"; if(!(activeUser.reviews)){ activeUser.reviews = [] } // these things only happen once the document is ready $(document).ready(function(){ console.log('The DOM body is ready') console.log('Body parser parsing that body!'); $('.batwich-chat').hide(); $('.hero-chat').hide(); //***************** //***************** //Gif Handlebars templates var sourceOne = $('#selectableGif-template2').html(), templateGif = Handlebars.compile(sourceOne), sourceThree = $('#gif-choice').html(), templateGifChoice = Handlebars.compile(sourceThree), // Review Handlebars template $reviewsList = ('#review-form'), sourceTwo = $("#review-template").html(), templateReview = Handlebars.compile(sourceTwo); //Restaurant Handlebars templates sourceRestaurant = $('#restaurant-template').html(), templateRestaurant = Handlebars.compile(sourceRestaurant), sourceTwoButtons = $('#review-template-buttons').html(), templateReviewButtons = Handlebars.compile(sourceTwoButtons); // this is what submits the form to add a review in $('.new-review').on('submit', function(event) { console.log('submit clicked'); event.preventDefault(); $.ajax({ method: 'POST', url: '/api/reviews', data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // this is what searches giphy for images $('.form-gif').on('submit', function(event){ console.log('gif submit clicked'); event.preventDefault(); $.ajax({ method: 'GET', url: giphyApi, data: $(this).serializeArray(), success: newGifSearchSuccess, error: newGifSearchError }) }) // this is what handles clicking on a gif $('.gifSelectionField2').on('click', '.gifBox', function(event){ $('.gifSelectionField2').empty(); var pickedGifHtml = templateGifChoice({ userChosenGif: this.src}); $('.selected-gif').empty(); $('.selected-gif').append(pickedGifHtml); }) // this is what populates selectable gifs function newGifSearchSuccess(json){ console.log('ajax call for gif successful. Gif: ', json); // empty space to prevent gifs from multiple searches showing at the same time $('.gifSelectionField2').empty(); json.data.forEach(function(gif){ var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url}); $(".gifSelectionField2").append(giphyHtml); }); } // when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint $.ajax({ method: 'GET', url: '/api/reviews', success: appendReviews, error: noAppend }) //when page loads, save active user to a variable $.ajax({ method: 'GET', url: '/api/user/active', success: saveUser, // error: noAppend }) // this is the area that deals with the map //hide map area when page loads $('#hero-map').hide(); // listener for find hero button. hides button to search again until map is moved. $('.map-section').on('click', '#map-button', function(){ console.log('map button pressed'); $('#hero-map').show(); $('.find-hero-button').hide(); // set default location as Hell Mi var defaultLocation = { location: { lat: 42.4347, lng: -83.9850 } } // crete the map using the default location createMap(defaultLocation); }) // creates a google map using location info function createMap(data){ console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng); $('.change-location').hide(); if (document.getElementById('mapPlacement'))
else { activeUser.location = data.location; console.log(activeUser); $.ajax({ method: 'POST', url: '/api/locations', data: activeUser, success: appendRestaurants, error: noRestaurants }) } } function noLocation(data){ console.log('could not find location ', data) } // looks at each restaraunt sent from yelp function showRestaurants(data){ console.log('you found restaurants! ', data); data.forEach(function(restaurant){ var location = { lat: restaurant.coordinates.latitude, lng: restaurant.coordinates.longitude } // this is the content that goes on the card associated with each restaurant in the map var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>' addMarker(location, content) }) } // places a marker on the map for each restaraunt function addMarker(position, content){ var myLatlng, marker, infowindow,contentString; // places each marker marker = new google.maps.Marker({ position: position, map: map }); // fills in data for the card that appears when clicking on any marker contentString = content; infowindow = new google.maps.InfoWindow({ content: contentString }); // listen for click to open the window when a marker is clicked on marker.addListener('click', function() { // open the restaraunt info when marker clicked on infowindow.open(map, marker); }); } function noRestaurants(data){ console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data); } //Detects clicking and dragging on the map, shows the button to search $('.hero-map').mousedown(function(){ if ($('.hero-map').mousemove(function(){ })){ $('.change-location').show(600); } }) // Listener for searching where the user currently is $('.current-location').on('click', '#current-location', findLocation) function findLocation (){ console.log('I know where you live!') $.ajax({ method: 'POST', url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg', success: createMap, error: noLocation }) } // Listener for searching where the map is currently centered $('.change-location').on('click', '#change-location', function(){ console.log('Searching in the new map location'); var movedMapLocation = { location: { lat: map.getCenter().lat(), lng: map.getCenter().lng() } } createMap(movedMapLocation); }) // button listener to hide the map area once it's open $('.map-section').on('click', '#hide-map-button', function(){ $('#hero-map').hide(); $('.find-hero-button').show(); }) // this is the end of the map area $('.business-search').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); activeUser.term = $(this).serializeArray()[0].value console.log(activeUser, $(this).serializeArray()) findLocation() }) function appendRestaurants(restaurants){ console.log(restaurants) $('#business-submit-form').removeClass('hidden') $('.restaurant-list').html('') restaurants.forEach(function (restaurant){ $('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name})) }) } $('.business-submit').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); var restaurant = $(this).serializeArray()[0].value; $('.business-forms').addClass('hidden'); $('#review-form').removeClass('hidden'); $('#review-form').prepend('<h2>'+restaurant+'</h2>'); $('#restaurant-input').val(restaurant) console.log(restaurant); }) // this is what spits out each review onto the page. function appendReviews(allReviews) { var reviewHtml; // for each review: allReviews.forEach(function(reviewData){ // create HTML for individual review var reviewInfo = { reviewContent: reviewData.reviewContent, reviewStars: reviewData.stars, // turnary cheking to see if reviewData is true or false - if true return yes, if false return no reviewRecommend: reviewData.recommend ? "Yes" : "No", reviewGif: reviewData.gif, reviewId: reviewData._id, author: reviewData.username }; console.log(reviewData.username) if (activeUser.reviews.indexOf(reviewData._id)>=0){ reviewHtml = templateReviewButtons(reviewInfo) } else { reviewHtml = templateReview(reviewInfo) } if(activeUser._id){ $('.login').hide() $('.sign-up').hide() } else { $('.create').hide() $('.logout').hide() } // add review to top of review area $('.appendReviews').prepend(reviewHtml); }); // listener for pressing the edit review. Directs to edit page. $('.reviewIndividual').on('click', '#edit-button', function(){ localStorage.setItem('classes', $(this).attr("class").split(' ')[0]); console.log('the edit button was pressed! Review Id is ' + classes); window.location.href="../edit"; }) // listener for the create review button. Directs to create page. $('#create-button').on('click', function(){ console.log('the create button was pressed!'); window.location.href="../create"; }) $('.edit-review').on('submit', function(event) { console.log('edit review submit clicked'); event.preventDefault(); $.ajax({ method: 'PUT', url: '/api/reviews/' + localStorage.getItem("classes"), data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // click event for pressing the delete review button. hits the delete route with Id from review $('.reviewIndividual').on('click', '#delete-button', function(){ // sets variable to be the first class associated with this button (which is the id of the review) var classes = $(this).attr("class").split(' ')[0]; console.log('the delete button was pressed! Review Id is ' + classes); $.ajax({ method: 'DELETE', url: '/api/reviews/' + classes, success: deleteReview, error: deleteFailure }) location.reload(); }) // this is the end of append reviews function }; function editReview(data){ console.log('Trying to edit the review below', data); templateReview({ reviewContent: data.reviewContent2 }) console.log('The review was edited', data); return templateReview; window.location.href="../" } function heroChat() { smackTalk = setInterval(function(){ $('.batwich-chat').empty(); $('.hero-chat').empty(); var chance = Math.round(Math.random()); if (chance) { $('.hero-chat').hide(); $('.batwich-chat').show(400); $('.batwich-chat').html(batwichSmack[Math.round(Math.random() * (batwichSmack.length - 1))]); } else { $('.batwich-chat').hide(); $('.hero-chat').show(400); $('.hero-chat').html(heroSmack[Math.round(Math.random() * (heroSmack.length - 1))]); } }, 5500); } heroChat(); // This is the end of on ready function }) function newReviewSuccess(review){ console.log('ajax call on review successful. Review: ', review); window.location.href="../" } function newReviewError(error){ console.log('ajax call on review dun messed up. Error: ', error); } function yelpSuccess(restaurant){ console.log(restaurant) } function yelpError (error){ console.log('ajax call on yelp dun messed up. Error: ', error); } function yelpCallback (data){ console.log('this is the yelp callback', data) } function noAppend (err){ console.log('the reviews did not append', err) } function newGifSearchError(error){ console.log('ajax call on gif search went bad, boss. Error: ', error); } function deleteReview(data){ console.log('delete review triggered!', data); } function deleteFailure(error){ console.log('The delete went bad. Did you delete the right thing? Did you delete everything?', error); } function editFailure(error){ console.log('Oh, no! We have failed to edit! Things remained the same, and you hated that stuff! Error: ', error); } function saveUser(user){ console.log(user) activeUser = user }
{ map = new google.maps.Map(document.getElementById('mapPlacement'), { center: {lat: data.location.lat, lng: data.location.lng}, zoom: 15 }) $.ajax({ method: 'POST', url: '/api/locations', data: data, success: showRestaurants, error: noRestaurants }) }
conditional_block
app.js
console.log('sanity check, app.js is connected') //Declare global variables here var map, template, $reviewsList, allReviews = [], classes, batwichSmack = [ 'Wanna know my secret identity?', 'Stick it in your food hole!', 'For whom the BLT tolls.', 'A hotdog is no sandwich.', 'Who wants a knuckle sandwich!?', 'Who you callin turkey!?', 'Swear to me!', "I'm Batwich", 'My parents were eaten when I was young. I took it poorly.', "I'm a cipher, wrapped in an enigma, smothered in secret sauce.", "I don't trust noodles. They're all impastas.", 'My partner is a small fry.' ], heroSmack = [ 'Eat me!', 'Silence of the hams.', 'The po-boy only rings twice.', "I'm pretty sure a hot dog is a sandwich.", 'I hAvE cHaT BuBbLeS!!', 'Whoa, no one called anyone a JT.', 'Stick it in your food hole!', 'I never get soggy.', 'My super power is flavor!', 'Please, do it for the sliders.', 'Potato chips do not belong in a sandwich.', 'Lettuce celebrate!' ]; if(!activeUser){ var activeUser = {} } var giphyApi = "https://api.giphy.com/v1/gifs/search"; if(!(activeUser.reviews)){ activeUser.reviews = [] } // these things only happen once the document is ready $(document).ready(function(){ console.log('The DOM body is ready') console.log('Body parser parsing that body!'); $('.batwich-chat').hide(); $('.hero-chat').hide(); //***************** //***************** //Gif Handlebars templates var sourceOne = $('#selectableGif-template2').html(), templateGif = Handlebars.compile(sourceOne), sourceThree = $('#gif-choice').html(), templateGifChoice = Handlebars.compile(sourceThree), // Review Handlebars template $reviewsList = ('#review-form'), sourceTwo = $("#review-template").html(), templateReview = Handlebars.compile(sourceTwo); //Restaurant Handlebars templates sourceRestaurant = $('#restaurant-template').html(), templateRestaurant = Handlebars.compile(sourceRestaurant), sourceTwoButtons = $('#review-template-buttons').html(), templateReviewButtons = Handlebars.compile(sourceTwoButtons); // this is what submits the form to add a review in $('.new-review').on('submit', function(event) { console.log('submit clicked'); event.preventDefault(); $.ajax({ method: 'POST', url: '/api/reviews', data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // this is what searches giphy for images $('.form-gif').on('submit', function(event){ console.log('gif submit clicked'); event.preventDefault(); $.ajax({ method: 'GET', url: giphyApi, data: $(this).serializeArray(), success: newGifSearchSuccess, error: newGifSearchError }) }) // this is what handles clicking on a gif $('.gifSelectionField2').on('click', '.gifBox', function(event){ $('.gifSelectionField2').empty(); var pickedGifHtml = templateGifChoice({ userChosenGif: this.src}); $('.selected-gif').empty(); $('.selected-gif').append(pickedGifHtml); }) // this is what populates selectable gifs function newGifSearchSuccess(json){ console.log('ajax call for gif successful. Gif: ', json); // empty space to prevent gifs from multiple searches showing at the same time $('.gifSelectionField2').empty(); json.data.forEach(function(gif){ var giphyHtml = templateGif({ insertGifHere: gif.images.fixed_height_small.url}); $(".gifSelectionField2").append(giphyHtml); }); } // when pages loads this will trigger and runs the append review function. gets reviews from review all endpoint $.ajax({ method: 'GET', url: '/api/reviews', success: appendReviews, error: noAppend }) //when page loads, save active user to a variable $.ajax({ method: 'GET', url: '/api/user/active', success: saveUser, // error: noAppend }) // this is the area that deals with the map //hide map area when page loads $('#hero-map').hide(); // listener for find hero button. hides button to search again until map is moved. $('.map-section').on('click', '#map-button', function(){ console.log('map button pressed'); $('#hero-map').show(); $('.find-hero-button').hide(); // set default location as Hell Mi var defaultLocation = { location: { lat: 42.4347, lng: -83.9850 } } // crete the map using the default location createMap(defaultLocation); }) // creates a google map using location info function createMap(data){ console.log('location found - lat: ', data.location.lat, 'lng: ', data.location.lng); $('.change-location').hide(); if (document.getElementById('mapPlacement')){ map = new google.maps.Map(document.getElementById('mapPlacement'), { center: {lat: data.location.lat, lng: data.location.lng}, zoom: 15 }) $.ajax({ method: 'POST', url: '/api/locations', data: data, success: showRestaurants, error: noRestaurants }) } else { activeUser.location = data.location; console.log(activeUser); $.ajax({ method: 'POST', url: '/api/locations', data: activeUser, success: appendRestaurants, error: noRestaurants }) } } function noLocation(data){ console.log('could not find location ', data) } // looks at each restaraunt sent from yelp function showRestaurants(data){ console.log('you found restaurants! ', data); data.forEach(function(restaurant){ var location = { lat: restaurant.coordinates.latitude, lng: restaurant.coordinates.longitude } // this is the content that goes on the card associated with each restaurant in the map var content = '<h6>' + restaurant.name + '</h6>' + '<p>' + restaurant.location.address1 + '</p>' addMarker(location, content) }) } // places a marker on the map for each restaraunt function addMarker(position, content){ var myLatlng, marker, infowindow,contentString; // places each marker marker = new google.maps.Marker({ position: position, map: map }); // fills in data for the card that appears when clicking on any marker contentString = content; infowindow = new google.maps.InfoWindow({ content: contentString }); // listen for click to open the window when a marker is clicked on marker.addListener('click', function() { // open the restaraunt info when marker clicked on infowindow.open(map, marker); }); } function noRestaurants(data){ console.log('you found no restaurants :( NO SOUP FOR YOU ... wait ... sandwich ... NO SANDWICH FOR YOU!!', data); } //Detects clicking and dragging on the map, shows the button to search $('.hero-map').mousedown(function(){ if ($('.hero-map').mousemove(function(){ })){ $('.change-location').show(600); } }) // Listener for searching where the user currently is $('.current-location').on('click', '#current-location', findLocation) function findLocation (){ console.log('I know where you live!') $.ajax({ method: 'POST', url: 'https://www.googleapis.com/geolocation/v1/geolocate?key=AIzaSyDN9w5iCC44NN-_bnoO7Yu8ZXnmHB_QmJg', success: createMap, error: noLocation }) } // Listener for searching where the map is currently centered $('.change-location').on('click', '#change-location', function(){ console.log('Searching in the new map location'); var movedMapLocation = { location: { lat: map.getCenter().lat(), lng: map.getCenter().lng() } } createMap(movedMapLocation); }) // button listener to hide the map area once it's open $('.map-section').on('click', '#hide-map-button', function(){ $('#hero-map').hide(); $('.find-hero-button').show(); }) // this is the end of the map area $('.business-search').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); activeUser.term = $(this).serializeArray()[0].value console.log(activeUser, $(this).serializeArray()) findLocation() }) function appendRestaurants(restaurants){ console.log(restaurants) $('#business-submit-form').removeClass('hidden') $('.restaurant-list').html('') restaurants.forEach(function (restaurant){ $('.restaurant-list').append(templateRestaurant({restaurantName: restaurant.name})) }) } $('.business-submit').on('submit', function(event) { event.preventDefault(); console.log('submit clicked'); var restaurant = $(this).serializeArray()[0].value; $('.business-forms').addClass('hidden'); $('#review-form').removeClass('hidden'); $('#review-form').prepend('<h2>'+restaurant+'</h2>'); $('#restaurant-input').val(restaurant) console.log(restaurant); }) // this is what spits out each review onto the page. function appendReviews(allReviews) { var reviewHtml; // for each review: allReviews.forEach(function(reviewData){ // create HTML for individual review var reviewInfo = { reviewContent: reviewData.reviewContent, reviewStars: reviewData.stars, // turnary cheking to see if reviewData is true or false - if true return yes, if false return no reviewRecommend: reviewData.recommend ? "Yes" : "No", reviewGif: reviewData.gif, reviewId: reviewData._id, author: reviewData.username }; console.log(reviewData.username) if (activeUser.reviews.indexOf(reviewData._id)>=0){ reviewHtml = templateReviewButtons(reviewInfo) } else { reviewHtml = templateReview(reviewInfo) } if(activeUser._id){ $('.login').hide() $('.sign-up').hide() } else { $('.create').hide() $('.logout').hide() } // add review to top of review area $('.appendReviews').prepend(reviewHtml); }); // listener for pressing the edit review. Directs to edit page. $('.reviewIndividual').on('click', '#edit-button', function(){ localStorage.setItem('classes', $(this).attr("class").split(' ')[0]); console.log('the edit button was pressed! Review Id is ' + classes); window.location.href="../edit"; }) // listener for the create review button. Directs to create page. $('#create-button').on('click', function(){ console.log('the create button was pressed!'); window.location.href="../create"; }) $('.edit-review').on('submit', function(event) { console.log('edit review submit clicked'); event.preventDefault(); $.ajax({ method: 'PUT', url: '/api/reviews/' + localStorage.getItem("classes"), data: $(this).serializeArray(), success: newReviewSuccess, error: newReviewError }) }) // click event for pressing the delete review button. hits the delete route with Id from review $('.reviewIndividual').on('click', '#delete-button', function(){ // sets variable to be the first class associated with this button (which is the id of the review) var classes = $(this).attr("class").split(' ')[0]; console.log('the delete button was pressed! Review Id is ' + classes); $.ajax({ method: 'DELETE', url: '/api/reviews/' + classes, success: deleteReview, error: deleteFailure }) location.reload(); }) // this is the end of append reviews function }; function editReview(data)
function heroChat() { smackTalk = setInterval(function(){ $('.batwich-chat').empty(); $('.hero-chat').empty(); var chance = Math.round(Math.random()); if (chance) { $('.hero-chat').hide(); $('.batwich-chat').show(400); $('.batwich-chat').html(batwichSmack[Math.round(Math.random() * (batwichSmack.length - 1))]); } else { $('.batwich-chat').hide(); $('.hero-chat').show(400); $('.hero-chat').html(heroSmack[Math.round(Math.random() * (heroSmack.length - 1))]); } }, 5500); } heroChat(); // This is the end of on ready function }) function newReviewSuccess(review){ console.log('ajax call on review successful. Review: ', review); window.location.href="../" } function newReviewError(error){ console.log('ajax call on review dun messed up. Error: ', error); } function yelpSuccess(restaurant){ console.log(restaurant) } function yelpError (error){ console.log('ajax call on yelp dun messed up. Error: ', error); } function yelpCallback (data){ console.log('this is the yelp callback', data) } function noAppend (err){ console.log('the reviews did not append', err) } function newGifSearchError(error){ console.log('ajax call on gif search went bad, boss. Error: ', error); } function deleteReview(data){ console.log('delete review triggered!', data); } function deleteFailure(error){ console.log('The delete went bad. Did you delete the right thing? Did you delete everything?', error); } function editFailure(error){ console.log('Oh, no! We have failed to edit! Things remained the same, and you hated that stuff! Error: ', error); } function saveUser(user){ console.log(user) activeUser = user }
{ console.log('Trying to edit the review below', data); templateReview({ reviewContent: data.reviewContent2 }) console.log('The review was edited', data); return templateReview; window.location.href="../" }
identifier_body
type_checker.rs
use scopeguard::{guard, ScopeGuard}; use super::model::Model; use super::cwf::*; use super::lang::ast::*; pub struct TypeChecker<T: Model> { model: T, ctxs : Vec<CtxInfo>, } struct CtxInfo { syntax: Ctx, // morphism from previous (if any) context to current weakening: Option<Morph>, defs: Vec<(String, Tm, Ty)>, } impl<TModel: Model> TypeChecker<TModel> { pub fn new(mut model: TModel) -> TypeChecker<TModel> { let empty = model.empty_ctx(); TypeChecker { model: model, ctxs: vec![CtxInfo { syntax: empty, weakening: None, defs: vec![] }], } } // Saves the current number of context extensions and definitions // in the current context and returns a scopeguard that will restore // to this state when it is dropped. The scope guard takes ownership // of the TC. fn save_ctx<'a>(&'a mut self) -> ScopeGuard<&mut TypeChecker<TModel>, impl FnOnce(&'a mut TypeChecker<TModel>)> { let depth = self.ctxs.len(); assert!(depth > 0); // always have empty context let num_defs = self.ctxs.last().unwrap().defs.len(); guard(self, move |s| { s.ctxs.truncate(depth); s.ctxs.last_mut().unwrap().defs.truncate(num_defs) }) } fn extend(&mut self, ext: &CtxExt) -> Result<Ty, String> { let ty = self.check_ty(&ext.1)?; let new_ctx = self.model.comprehension(&ty); let weakening = self.model.weakening(&ty); let mut defs = vec![]; if let Some(ref name) = ext.0 { let var_ty = Self::subst_ty(&mut self.model, &weakening, &ty); defs.push((name.clone(), self.model.var(&ty), var_ty)) } let new_ctx_info = CtxInfo { syntax: new_ctx, weakening: Some(weakening), defs: defs }; self.ctxs.push(new_ctx_info); Ok(ty) } pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> { let mut s = self.save_ctx(); for ext in def.ctx.iter() { s.extend(ext)?; } let ret_ty = s.check_ty(&def.ret_ty)?; s.check_tm_ty(&def.body, &ret_ty) } fn check_let<T, F>( &mut self, check_body: F, name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String> where F : FnOnce(&mut Self, &Expr) -> Result<T, String> { let mut s = self.save_ctx(); let ty = s.check_ty(ty)?; let val = s.check_tm_ty(val, &ty)?; if let Some(name) = name { s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty)); }; check_body(&mut s, body) } pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; match expr { Expr::App(id, v) => match (id.as_str(), &v[..]) { ("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)), ("eq", [a, b]) => self.check_eq(a, b), (s, v) => Err(format!("Unexpected {} with {} args", s, v.len())) }, Expr::Let { name, ty, val, body } => self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body), _ => Err(format!("Unhandled type {:?}", expr)) } } pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> { match expr { Expr::App(id, v) => match (id.as_str(), &v[..]) { ("refl", [a]) => self.refl(&*a), ("true", []) => Ok(self.true_tm()), ("false", []) => Ok(self.false_tm()), (v, []) => self.access_var(v), (s, v) => Err(format!("Unexpected {} with {} args", s, v.len())) }, Expr::Let { name, ty, val, body } => self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body), Expr::Elim { val, into_ctx, into_ty, cases } => self.check_elim(&*val, into_ctx, &*into_ty, cases), } } fn
(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> { let (tm, _) = self.check_tm(expr)?; let eq_ty = self.model.eq_ty(&tm, &tm); let refl_tm = self.model.refl(&tm); Ok((refl_tm, eq_ty)) } fn true_tm(&mut self) -> (Tm, Ty) { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bool_ty = self.model.bool_ty(cur_ctx_syn); let tm = self.model.true_tm(cur_ctx_syn); (tm, bool_ty) } fn false_tm(&mut self) -> (Tm, Ty) { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bool_ty = self.model.bool_ty(cur_ctx_syn); let tm = self.model.false_tm(cur_ctx_syn); (tm, bool_ty) } // Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G // substituting the last A for a in any term in G.A. fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph { let id = model.id_morph(ctx); model.extension(&id, ty, tm) } fn check_elim( &mut self, val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr, cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String> { let (val_tm, val_ty) = self.check_tm(val)?; let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax); let (elim_tm, elim_ty) = if self.model.ty_eq(&val_ty, &bool_ty) { self.elim_bool(into_ctx, into_ty, cases)? } else { return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty)) }; // Substitute bar(val_tm) into elimination term and type, which live // live in an extended context. let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm); let tm = self.model.subst_tm(&bar, &elim_tm); let ty = self.model.subst_ty(&bar, &elim_ty); Ok((tm, ty)) } fn elim_bool( &mut self, into_ctx: &Vec<CtxExt>, into_ty: &Expr, cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String> { if into_ctx.len() != 1 || cases.len() != 2 || cases[0].0.len() != 0 || cases[1].0.len() != 0 { return Err("Invalid bool elimination".to_owned()) } let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone(); let bool_ty = self.model.bool_ty(&cur_ctx_syn); let into_ty = { let mut s = self.save_ctx(); let ext_ty = s.extend(&into_ctx[0])?; if !s.model.ty_eq(&ext_ty, &bool_ty) { return Err("Invalid extension for into-type: expected bool".to_owned()); } s.check_ty(into_ty)? }; let true_tm = self.model.true_tm(&cur_ctx_syn); let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm); let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty); let false_tm = self.model.false_tm(&cur_ctx_syn); let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm); let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty); let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?; let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?; let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm); // Define substitutions by true and false Self::subst_tm(&mut self.model, &true_bar, &tm); Self::subst_tm(&mut self.model, &false_bar, &tm); Ok((tm, into_ty)) } fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> { let (tm, ty) = self.check_tm(expr)?; if self.model.ty_eq(&ty, expected_ty) { Ok(tm) } else { Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty)) } } fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> { let mut ctx_index = self.ctxs.len(); for ctx in self.ctxs.iter().rev() { ctx_index -= 1; for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() { if ctx_var_name != name { continue } let mut tm = tm.clone(); let mut ty = ty.clone(); // Found term, inject it into current context. for ctx in &self.ctxs[ctx_index+1..] { let weakening = match ctx.weakening { Some(ref w) => w, None => panic!("expected weakening to be available") }; tm = Self::subst_tm(&mut self.model, &weakening, &tm); ty = Self::subst_ty(&mut self.model, &weakening, &ty); } return Ok((tm, ty)) } } Err(format!("unknown definition {}", name)) } fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> { let (tma, tya) = self.check_tm(a)?; let tmb = self.check_tm_ty(b, &tya)?; Ok(self.model.eq_ty(&tma, &tmb)) } fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty { model.subst_ty(g, ty); match ty { Ty::Subst(f, s) => { // g (f s) = (g . f) s let gf = Self::comp_morphs(model, g, &*f); Self::subst_ty(model, &gf, &*s) }, Ty::Bool(_) => { let codomain = Self::morph_codomain(model, g); model.bool_ty(&codomain) }, Ty::Eq(a, b) => { let ga = Self::subst_tm(model, g, &*a); let gb = Self::subst_tm(model, g, &*b); model.eq_ty(&ga, &gb) }, } } fn subst_tm(model: &mut TModel, g: &Morph, tm: &Tm) -> Tm { let gtm = model.subst_tm(g, tm); match tm { Tm::Subst(f, tm) => { // g (f tm) = (g . f) tm let gf = Self::comp_morphs(model, g, &*f); Self::subst_tm(model, &gf, &*tm) }, Tm::Refl(a) => { let ga = Self::subst_tm(model, g, &*a); model.refl(&ga) }, Tm::True(_) => { let codomain = Self::morph_codomain(model, g); model.true_tm(&codomain) }, Tm::False(_) => { let codomain = Self::morph_codomain(model, g); model.false_tm(&codomain) }, Tm::ElimBool(ctx, into_ty, true_case, false_case) => { let ctx_bool = model.bool_ty(ctx); let w = model.weakening(&ctx_bool); // w : ctx -> ctx.bool (where into_ty, true_case and false_case) live let gw = Self::comp_morphs(model, g, &w); Self::subst_ty(model, &gw, &*into_ty); Self::subst_tm(model, &gw, &*true_case); Self::subst_tm(model, &gw, &*false_case); gtm }, _ => gtm } } fn comp_morphs(model: &mut TModel, g: &Morph, f: &Morph) -> Morph { let gf = model.compose(g, f); match f { Morph::Identity(_) => g.clone(), Morph::Composition(f, e) => { // g . (f . e) = (g . f) . e let gf = model.compose(g, &*f); Self::comp_morphs(model, &gf, e) }, Morph::Extension(f, s, tm) => { // g . <f, s, tm> = <g . f, s, gtm> let gf = Self::comp_morphs(model, g, f); let gtm = model.subst_tm(g, tm); model.extension(&gf, &*s, &gtm) } _ => gf } } fn morph_codomain(model: &mut TModel, morph: &Morph) -> Ctx { match morph { Morph::Identity(ctx) => (**ctx).clone(), Morph::Weakening(ty) => model.comprehension(&*ty), Morph::Composition(g, _) => Self::morph_codomain(model, g), Morph::Extension(f, _, _) => Self::morph_codomain(model, f), } } } #[cfg(test)] mod tests { use crate::cwf_model; use crate::lang::parser::DefParser; fn verify_def(code: &str) { let p = DefParser::new().parse(code).unwrap(); let model = cwf_model::Cwf::new(); super::TypeChecker::new(model).check_def(&p).unwrap(); } #[test] fn id() { verify_def("def id (b : bool) : bool := b."); } #[test] fn negb() { verify_def(" def negb (b : bool) : bool := elim b into (_ : bool) : bool | => false | => true end."); } #[test] fn transitive() { verify_def(" def trans (a b c d e : bool) (p1 : a = b) (p2 : b = c) (p3 : c = d) (p4 : d = e) : a = e := refl a.") } #[test] fn uip() { verify_def(" def uip (a b : bool) (p : a = b) (q : b = a) : p = q := refl p.") } #[test] fn eta() { verify_def(" def eta (a : bool) : a = elim a into (_ : bool) : bool | => true | => false end := elim a into (b : bool) : b = elim b into (_ : bool) : bool | => true | => false end | => refl true | => refl false end.") } }
refl
identifier_name
type_checker.rs
use scopeguard::{guard, ScopeGuard}; use super::model::Model; use super::cwf::*; use super::lang::ast::*; pub struct TypeChecker<T: Model> { model: T, ctxs : Vec<CtxInfo>, } struct CtxInfo { syntax: Ctx, // morphism from previous (if any) context to current weakening: Option<Morph>, defs: Vec<(String, Tm, Ty)>, } impl<TModel: Model> TypeChecker<TModel> { pub fn new(mut model: TModel) -> TypeChecker<TModel> { let empty = model.empty_ctx(); TypeChecker { model: model, ctxs: vec![CtxInfo { syntax: empty, weakening: None, defs: vec![] }], } } // Saves the current number of context extensions and definitions // in the current context and returns a scopeguard that will restore // to this state when it is dropped. The scope guard takes ownership // of the TC. fn save_ctx<'a>(&'a mut self) -> ScopeGuard<&mut TypeChecker<TModel>, impl FnOnce(&'a mut TypeChecker<TModel>)> { let depth = self.ctxs.len(); assert!(depth > 0); // always have empty context let num_defs = self.ctxs.last().unwrap().defs.len(); guard(self, move |s| { s.ctxs.truncate(depth); s.ctxs.last_mut().unwrap().defs.truncate(num_defs) }) } fn extend(&mut self, ext: &CtxExt) -> Result<Ty, String> { let ty = self.check_ty(&ext.1)?; let new_ctx = self.model.comprehension(&ty); let weakening = self.model.weakening(&ty); let mut defs = vec![]; if let Some(ref name) = ext.0 { let var_ty = Self::subst_ty(&mut self.model, &weakening, &ty); defs.push((name.clone(), self.model.var(&ty), var_ty)) } let new_ctx_info = CtxInfo { syntax: new_ctx, weakening: Some(weakening), defs: defs }; self.ctxs.push(new_ctx_info); Ok(ty) } pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> { let mut s = self.save_ctx(); for ext in def.ctx.iter() { s.extend(ext)?; } let ret_ty = s.check_ty(&def.ret_ty)?; s.check_tm_ty(&def.body, &ret_ty) } fn check_let<T, F>( &mut self, check_body: F, name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String> where F : FnOnce(&mut Self, &Expr) -> Result<T, String> { let mut s = self.save_ctx(); let ty = s.check_ty(ty)?; let val = s.check_tm_ty(val, &ty)?;
if let Some(name) = name { s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty)); }; check_body(&mut s, body) } pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; match expr { Expr::App(id, v) => match (id.as_str(), &v[..]) { ("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)), ("eq", [a, b]) => self.check_eq(a, b), (s, v) => Err(format!("Unexpected {} with {} args", s, v.len())) }, Expr::Let { name, ty, val, body } => self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body), _ => Err(format!("Unhandled type {:?}", expr)) } } pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> { match expr { Expr::App(id, v) => match (id.as_str(), &v[..]) { ("refl", [a]) => self.refl(&*a), ("true", []) => Ok(self.true_tm()), ("false", []) => Ok(self.false_tm()), (v, []) => self.access_var(v), (s, v) => Err(format!("Unexpected {} with {} args", s, v.len())) }, Expr::Let { name, ty, val, body } => self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body), Expr::Elim { val, into_ctx, into_ty, cases } => self.check_elim(&*val, into_ctx, &*into_ty, cases), } } fn refl(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> { let (tm, _) = self.check_tm(expr)?; let eq_ty = self.model.eq_ty(&tm, &tm); let refl_tm = self.model.refl(&tm); Ok((refl_tm, eq_ty)) } fn true_tm(&mut self) -> (Tm, Ty) { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bool_ty = self.model.bool_ty(cur_ctx_syn); let tm = self.model.true_tm(cur_ctx_syn); (tm, bool_ty) } fn false_tm(&mut self) -> (Tm, Ty) { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bool_ty = self.model.bool_ty(cur_ctx_syn); let tm = self.model.false_tm(cur_ctx_syn); (tm, bool_ty) } // Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G // substituting the last A for a in any term in G.A. fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph { let id = model.id_morph(ctx); model.extension(&id, ty, tm) } fn check_elim( &mut self, val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr, cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String> { let (val_tm, val_ty) = self.check_tm(val)?; let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax); let (elim_tm, elim_ty) = if self.model.ty_eq(&val_ty, &bool_ty) { self.elim_bool(into_ctx, into_ty, cases)? } else { return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty)) }; // Substitute bar(val_tm) into elimination term and type, which live // live in an extended context. let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm); let tm = self.model.subst_tm(&bar, &elim_tm); let ty = self.model.subst_ty(&bar, &elim_ty); Ok((tm, ty)) } fn elim_bool( &mut self, into_ctx: &Vec<CtxExt>, into_ty: &Expr, cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String> { if into_ctx.len() != 1 || cases.len() != 2 || cases[0].0.len() != 0 || cases[1].0.len() != 0 { return Err("Invalid bool elimination".to_owned()) } let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone(); let bool_ty = self.model.bool_ty(&cur_ctx_syn); let into_ty = { let mut s = self.save_ctx(); let ext_ty = s.extend(&into_ctx[0])?; if !s.model.ty_eq(&ext_ty, &bool_ty) { return Err("Invalid extension for into-type: expected bool".to_owned()); } s.check_ty(into_ty)? }; let true_tm = self.model.true_tm(&cur_ctx_syn); let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm); let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty); let false_tm = self.model.false_tm(&cur_ctx_syn); let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm); let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty); let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?; let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?; let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm); // Define substitutions by true and false Self::subst_tm(&mut self.model, &true_bar, &tm); Self::subst_tm(&mut self.model, &false_bar, &tm); Ok((tm, into_ty)) } fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> { let (tm, ty) = self.check_tm(expr)?; if self.model.ty_eq(&ty, expected_ty) { Ok(tm) } else { Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty)) } } fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> { let mut ctx_index = self.ctxs.len(); for ctx in self.ctxs.iter().rev() { ctx_index -= 1; for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() { if ctx_var_name != name { continue } let mut tm = tm.clone(); let mut ty = ty.clone(); // Found term, inject it into current context. for ctx in &self.ctxs[ctx_index+1..] { let weakening = match ctx.weakening { Some(ref w) => w, None => panic!("expected weakening to be available") }; tm = Self::subst_tm(&mut self.model, &weakening, &tm); ty = Self::subst_ty(&mut self.model, &weakening, &ty); } return Ok((tm, ty)) } } Err(format!("unknown definition {}", name)) } fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> { let (tma, tya) = self.check_tm(a)?; let tmb = self.check_tm_ty(b, &tya)?; Ok(self.model.eq_ty(&tma, &tmb)) } fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty { model.subst_ty(g, ty); match ty { Ty::Subst(f, s) => { // g (f s) = (g . f) s let gf = Self::comp_morphs(model, g, &*f); Self::subst_ty(model, &gf, &*s) }, Ty::Bool(_) => { let codomain = Self::morph_codomain(model, g); model.bool_ty(&codomain) }, Ty::Eq(a, b) => { let ga = Self::subst_tm(model, g, &*a); let gb = Self::subst_tm(model, g, &*b); model.eq_ty(&ga, &gb) }, } } fn subst_tm(model: &mut TModel, g: &Morph, tm: &Tm) -> Tm { let gtm = model.subst_tm(g, tm); match tm { Tm::Subst(f, tm) => { // g (f tm) = (g . f) tm let gf = Self::comp_morphs(model, g, &*f); Self::subst_tm(model, &gf, &*tm) }, Tm::Refl(a) => { let ga = Self::subst_tm(model, g, &*a); model.refl(&ga) }, Tm::True(_) => { let codomain = Self::morph_codomain(model, g); model.true_tm(&codomain) }, Tm::False(_) => { let codomain = Self::morph_codomain(model, g); model.false_tm(&codomain) }, Tm::ElimBool(ctx, into_ty, true_case, false_case) => { let ctx_bool = model.bool_ty(ctx); let w = model.weakening(&ctx_bool); // w : ctx -> ctx.bool (where into_ty, true_case and false_case) live let gw = Self::comp_morphs(model, g, &w); Self::subst_ty(model, &gw, &*into_ty); Self::subst_tm(model, &gw, &*true_case); Self::subst_tm(model, &gw, &*false_case); gtm }, _ => gtm } } fn comp_morphs(model: &mut TModel, g: &Morph, f: &Morph) -> Morph { let gf = model.compose(g, f); match f { Morph::Identity(_) => g.clone(), Morph::Composition(f, e) => { // g . (f . e) = (g . f) . e let gf = model.compose(g, &*f); Self::comp_morphs(model, &gf, e) }, Morph::Extension(f, s, tm) => { // g . <f, s, tm> = <g . f, s, gtm> let gf = Self::comp_morphs(model, g, f); let gtm = model.subst_tm(g, tm); model.extension(&gf, &*s, &gtm) } _ => gf } } fn morph_codomain(model: &mut TModel, morph: &Morph) -> Ctx { match morph { Morph::Identity(ctx) => (**ctx).clone(), Morph::Weakening(ty) => model.comprehension(&*ty), Morph::Composition(g, _) => Self::morph_codomain(model, g), Morph::Extension(f, _, _) => Self::morph_codomain(model, f), } } } #[cfg(test)] mod tests { use crate::cwf_model; use crate::lang::parser::DefParser; fn verify_def(code: &str) { let p = DefParser::new().parse(code).unwrap(); let model = cwf_model::Cwf::new(); super::TypeChecker::new(model).check_def(&p).unwrap(); } #[test] fn id() { verify_def("def id (b : bool) : bool := b."); } #[test] fn negb() { verify_def(" def negb (b : bool) : bool := elim b into (_ : bool) : bool | => false | => true end."); } #[test] fn transitive() { verify_def(" def trans (a b c d e : bool) (p1 : a = b) (p2 : b = c) (p3 : c = d) (p4 : d = e) : a = e := refl a.") } #[test] fn uip() { verify_def(" def uip (a b : bool) (p : a = b) (q : b = a) : p = q := refl p.") } #[test] fn eta() { verify_def(" def eta (a : bool) : a = elim a into (_ : bool) : bool | => true | => false end := elim a into (b : bool) : b = elim b into (_ : bool) : bool | => true | => false end | => refl true | => refl false end.") } }
random_line_split
type_checker.rs
use scopeguard::{guard, ScopeGuard}; use super::model::Model; use super::cwf::*; use super::lang::ast::*; pub struct TypeChecker<T: Model> { model: T, ctxs : Vec<CtxInfo>, } struct CtxInfo { syntax: Ctx, // morphism from previous (if any) context to current weakening: Option<Morph>, defs: Vec<(String, Tm, Ty)>, } impl<TModel: Model> TypeChecker<TModel> { pub fn new(mut model: TModel) -> TypeChecker<TModel> { let empty = model.empty_ctx(); TypeChecker { model: model, ctxs: vec![CtxInfo { syntax: empty, weakening: None, defs: vec![] }], } } // Saves the current number of context extensions and definitions // in the current context and returns a scopeguard that will restore // to this state when it is dropped. The scope guard takes ownership // of the TC. fn save_ctx<'a>(&'a mut self) -> ScopeGuard<&mut TypeChecker<TModel>, impl FnOnce(&'a mut TypeChecker<TModel>)> { let depth = self.ctxs.len(); assert!(depth > 0); // always have empty context let num_defs = self.ctxs.last().unwrap().defs.len(); guard(self, move |s| { s.ctxs.truncate(depth); s.ctxs.last_mut().unwrap().defs.truncate(num_defs) }) } fn extend(&mut self, ext: &CtxExt) -> Result<Ty, String> { let ty = self.check_ty(&ext.1)?; let new_ctx = self.model.comprehension(&ty); let weakening = self.model.weakening(&ty); let mut defs = vec![]; if let Some(ref name) = ext.0 { let var_ty = Self::subst_ty(&mut self.model, &weakening, &ty); defs.push((name.clone(), self.model.var(&ty), var_ty)) } let new_ctx_info = CtxInfo { syntax: new_ctx, weakening: Some(weakening), defs: defs }; self.ctxs.push(new_ctx_info); Ok(ty) } pub fn check_def(&mut self, def: &Def) -> Result<Tm, String> { let mut s = self.save_ctx(); for ext in def.ctx.iter() { s.extend(ext)?; } let ret_ty = s.check_ty(&def.ret_ty)?; s.check_tm_ty(&def.body, &ret_ty) } fn check_let<T, F>( &mut self, check_body: F, name: &DefId, ty: &Expr, val: &Expr, body: &Expr) -> Result<T, String> where F : FnOnce(&mut Self, &Expr) -> Result<T, String> { let mut s = self.save_ctx(); let ty = s.check_ty(ty)?; let val = s.check_tm_ty(val, &ty)?; if let Some(name) = name { s.ctxs.last_mut().unwrap().defs.push((name.clone(), val, ty)); }; check_body(&mut s, body) } pub fn check_ty(&mut self, expr: &Expr) -> Result<Ty, String> { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; match expr { Expr::App(id, v) => match (id.as_str(), &v[..]) { ("bool", []) => Ok(self.model.bool_ty(cur_ctx_syn)), ("eq", [a, b]) => self.check_eq(a, b), (s, v) => Err(format!("Unexpected {} with {} args", s, v.len())) }, Expr::Let { name, ty, val, body } => self.check_let(|s, body| s.check_ty(body), name, &*ty, &*val, &*body), _ => Err(format!("Unhandled type {:?}", expr)) } } pub fn check_tm(&mut self, expr: &Expr) -> Result<(Tm, Ty), String> { match expr { Expr::App(id, v) => match (id.as_str(), &v[..]) { ("refl", [a]) => self.refl(&*a), ("true", []) => Ok(self.true_tm()), ("false", []) => Ok(self.false_tm()), (v, []) => self.access_var(v), (s, v) => Err(format!("Unexpected {} with {} args", s, v.len())) }, Expr::Let { name, ty, val, body } => self.check_let(|s, body| s.check_tm(body), name, &*ty, &*val, &*body), Expr::Elim { val, into_ctx, into_ty, cases } => self.check_elim(&*val, into_ctx, &*into_ty, cases), } } fn refl(&mut self, expr: &Expr) -> Result<(Tm, Ty), String>
fn true_tm(&mut self) -> (Tm, Ty) { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bool_ty = self.model.bool_ty(cur_ctx_syn); let tm = self.model.true_tm(cur_ctx_syn); (tm, bool_ty) } fn false_tm(&mut self) -> (Tm, Ty) { let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bool_ty = self.model.bool_ty(cur_ctx_syn); let tm = self.model.false_tm(cur_ctx_syn); (tm, bool_ty) } // Given G |- a : A, construct the morphism <1(G), A, a> : G.A -> G // substituting the last A for a in any term in G.A. fn bar_tm(model: &mut TModel, ctx: &Ctx, ty: &Ty, tm: &Tm) -> Morph { let id = model.id_morph(ctx); model.extension(&id, ty, tm) } fn check_elim( &mut self, val: &Expr, into_ctx: &Vec<CtxExt>, into_ty: &Expr, cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String> { let (val_tm, val_ty) = self.check_tm(val)?; let bool_ty = self.model.bool_ty(&self.ctxs.last().unwrap().syntax); let (elim_tm, elim_ty) = if self.model.ty_eq(&val_ty, &bool_ty) { self.elim_bool(into_ctx, into_ty, cases)? } else { return Err(format!("Cannot eliminate {:?} of type {:?}", val, val_ty)) }; // Substitute bar(val_tm) into elimination term and type, which live // live in an extended context. let cur_ctx_syn = &self.ctxs.last().unwrap().syntax; let bar = Self::bar_tm(&mut self.model, cur_ctx_syn, &val_ty, &val_tm); let tm = self.model.subst_tm(&bar, &elim_tm); let ty = self.model.subst_ty(&bar, &elim_ty); Ok((tm, ty)) } fn elim_bool( &mut self, into_ctx: &Vec<CtxExt>, into_ty: &Expr, cases: &Vec<ElimCase>) -> Result<(Tm, Ty), String> { if into_ctx.len() != 1 || cases.len() != 2 || cases[0].0.len() != 0 || cases[1].0.len() != 0 { return Err("Invalid bool elimination".to_owned()) } let cur_ctx_syn = self.ctxs.last().unwrap().syntax.clone(); let bool_ty = self.model.bool_ty(&cur_ctx_syn); let into_ty = { let mut s = self.save_ctx(); let ext_ty = s.extend(&into_ctx[0])?; if !s.model.ty_eq(&ext_ty, &bool_ty) { return Err("Invalid extension for into-type: expected bool".to_owned()); } s.check_ty(into_ty)? }; let true_tm = self.model.true_tm(&cur_ctx_syn); let true_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &true_tm); let expected_ty_true_case = Self::subst_ty(&mut self.model, &true_bar, &into_ty); let false_tm = self.model.false_tm(&cur_ctx_syn); let false_bar = Self::bar_tm(&mut self.model, &cur_ctx_syn, &bool_ty, &false_tm); let expected_ty_false_case = Self::subst_ty(&mut self.model, &false_bar, &into_ty); let true_case_tm = self.check_tm_ty(&cases[0].1, &expected_ty_true_case)?; let false_case_tm = self.check_tm_ty(&cases[1].1, &expected_ty_false_case)?; let tm = self.model.elim_bool(&cur_ctx_syn, &into_ty, &true_case_tm, &false_case_tm); // Define substitutions by true and false Self::subst_tm(&mut self.model, &true_bar, &tm); Self::subst_tm(&mut self.model, &false_bar, &tm); Ok((tm, into_ty)) } fn check_tm_ty(&mut self, expr: &Expr, expected_ty: &Ty) -> Result<Tm, String> { let (tm, ty) = self.check_tm(expr)?; if self.model.ty_eq(&ty, expected_ty) { Ok(tm) } else { Err(format!("expected:\n{:?}\ngot:\n{:?}", expected_ty, ty)) } } fn access_var(&mut self, name: &str) -> Result<(Tm, Ty), String> { let mut ctx_index = self.ctxs.len(); for ctx in self.ctxs.iter().rev() { ctx_index -= 1; for (ref ctx_var_name, ref tm, ref ty) in ctx.defs.iter().rev() { if ctx_var_name != name { continue } let mut tm = tm.clone(); let mut ty = ty.clone(); // Found term, inject it into current context. for ctx in &self.ctxs[ctx_index+1..] { let weakening = match ctx.weakening { Some(ref w) => w, None => panic!("expected weakening to be available") }; tm = Self::subst_tm(&mut self.model, &weakening, &tm); ty = Self::subst_ty(&mut self.model, &weakening, &ty); } return Ok((tm, ty)) } } Err(format!("unknown definition {}", name)) } fn check_eq(&mut self, a: &Expr, b: &Expr) -> Result<Ty, String> { let (tma, tya) = self.check_tm(a)?; let tmb = self.check_tm_ty(b, &tya)?; Ok(self.model.eq_ty(&tma, &tmb)) } fn subst_ty(model: &mut TModel, g: &Morph, ty: &Ty) -> Ty { model.subst_ty(g, ty); match ty { Ty::Subst(f, s) => { // g (f s) = (g . f) s let gf = Self::comp_morphs(model, g, &*f); Self::subst_ty(model, &gf, &*s) }, Ty::Bool(_) => { let codomain = Self::morph_codomain(model, g); model.bool_ty(&codomain) }, Ty::Eq(a, b) => { let ga = Self::subst_tm(model, g, &*a); let gb = Self::subst_tm(model, g, &*b); model.eq_ty(&ga, &gb) }, } } fn subst_tm(model: &mut TModel, g: &Morph, tm: &Tm) -> Tm { let gtm = model.subst_tm(g, tm); match tm { Tm::Subst(f, tm) => { // g (f tm) = (g . f) tm let gf = Self::comp_morphs(model, g, &*f); Self::subst_tm(model, &gf, &*tm) }, Tm::Refl(a) => { let ga = Self::subst_tm(model, g, &*a); model.refl(&ga) }, Tm::True(_) => { let codomain = Self::morph_codomain(model, g); model.true_tm(&codomain) }, Tm::False(_) => { let codomain = Self::morph_codomain(model, g); model.false_tm(&codomain) }, Tm::ElimBool(ctx, into_ty, true_case, false_case) => { let ctx_bool = model.bool_ty(ctx); let w = model.weakening(&ctx_bool); // w : ctx -> ctx.bool (where into_ty, true_case and false_case) live let gw = Self::comp_morphs(model, g, &w); Self::subst_ty(model, &gw, &*into_ty); Self::subst_tm(model, &gw, &*true_case); Self::subst_tm(model, &gw, &*false_case); gtm }, _ => gtm } } fn comp_morphs(model: &mut TModel, g: &Morph, f: &Morph) -> Morph { let gf = model.compose(g, f); match f { Morph::Identity(_) => g.clone(), Morph::Composition(f, e) => { // g . (f . e) = (g . f) . e let gf = model.compose(g, &*f); Self::comp_morphs(model, &gf, e) }, Morph::Extension(f, s, tm) => { // g . <f, s, tm> = <g . f, s, gtm> let gf = Self::comp_morphs(model, g, f); let gtm = model.subst_tm(g, tm); model.extension(&gf, &*s, &gtm) } _ => gf } } fn morph_codomain(model: &mut TModel, morph: &Morph) -> Ctx { match morph { Morph::Identity(ctx) => (**ctx).clone(), Morph::Weakening(ty) => model.comprehension(&*ty), Morph::Composition(g, _) => Self::morph_codomain(model, g), Morph::Extension(f, _, _) => Self::morph_codomain(model, f), } } } #[cfg(test)] mod tests { use crate::cwf_model; use crate::lang::parser::DefParser; fn verify_def(code: &str) { let p = DefParser::new().parse(code).unwrap(); let model = cwf_model::Cwf::new(); super::TypeChecker::new(model).check_def(&p).unwrap(); } #[test] fn id() { verify_def("def id (b : bool) : bool := b."); } #[test] fn negb() { verify_def(" def negb (b : bool) : bool := elim b into (_ : bool) : bool | => false | => true end."); } #[test] fn transitive() { verify_def(" def trans (a b c d e : bool) (p1 : a = b) (p2 : b = c) (p3 : c = d) (p4 : d = e) : a = e := refl a.") } #[test] fn uip() { verify_def(" def uip (a b : bool) (p : a = b) (q : b = a) : p = q := refl p.") } #[test] fn eta() { verify_def(" def eta (a : bool) : a = elim a into (_ : bool) : bool | => true | => false end := elim a into (b : bool) : b = elim b into (_ : bool) : bool | => true | => false end | => refl true | => refl false end.") } }
{ let (tm, _) = self.check_tm(expr)?; let eq_ty = self.model.eq_ty(&tm, &tm); let refl_tm = self.model.refl(&tm); Ok((refl_tm, eq_ty)) }
identifier_body
app.component.local.ts
// ------------------------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. // ------------------------------------------------------------------------------ import { Component, OnInit, Input, ChangeDetectorRef, DoCheck, AfterViewInit } from '@angular/core'; import { Response, Headers } from '@angular/http'; import { ExplorerOptions, RequestType, ExplorerValues, GraphApiCall, GraphRequestHeader, Message, SampleQuery, MessageBarContent, GraphApiVersions, GraphApiVersion } from "./base"; import { GraphExplorerComponent } from "./GraphExplorerComponent"; import { initAuth, checkHasValidAuthToken, isAuthenticated } from "./auth"; import { initFabricComponents } from "./fabric-components"; import { GraphService } from "./graph-service"; import { isImageResponse, isHtmlResponse, isXmlResponse, handleHtmlResponse, handleXmlResponse, handleJsonResponse, handleImageResponse, insertHeadersIntoResponseViewer, showResults } from "./response-handlers"; import { saveHistoryToLocalStorage, loadHistoryFromLocalStorage } from "./history"; import { createHeaders, getParameterByName } from "./util"; import { getRequestBodyEditor, getAceEditorFromElId, getJsonViewer } from "./api-explorer-jseditor"; import { parseMetadata, constructGraphLinksFromFullPath } from "./graph-structure"; import { ResponseStatusBarComponent } from "./response-status-bar.component"; import { GenericDialogComponent } from "./generic-message-dialog.component"; import { getString } from "./localization-helpers"; declare let mwf, ga, moment; @Component({ selector: 'api-explorer', providers: [GraphService], templateUrl: './app.component.html', styles: [` #explorer-main { padding-left: 12px; } sidebar { padding: 0px; } `] }) export class AppComponent extends GraphExplorerComponent implements OnInit, AfterViewInit { ngAfterViewInit(): void { // Headers aren't updated when that tab is hidden, so when clicking on any tab reinsert the headers if (typeof $ !== "undefined") { $("#response-viewer-labels .ms-Pivot-link").on('click', () => { insertHeadersIntoResponseViewer(AppComponent.lastApiCallHeaders) }); } parseMetadata(this.GraphService, "v1.0"); parseMetadata(this.GraphService, "beta"); } static svc:GraphService; static messageBarContent:MessageBarContent; static lastApiCallHeaders: Headers; static _changeDetectionRef:ChangeDetectorRef; static message:Message; constructor(private GraphService: GraphService, private chRef: ChangeDetectorRef) { super(); AppComponent.svc = GraphService; AppComponent._changeDetectionRef = chRef; } ngOnInit() { for (let key in AppComponent.Options) { if (key in window) AppComponent.Options[key] = window[key]; } AppComponent.Options.GraphVersions.push("Other"); initAuth(AppComponent.Options, this.GraphService, this.chRef); initFabricComponents(); mwf.ComponentFactory.create([{ 'component': mwf.Drawer, }]) moment.locale(AppComponent.Options.Language); } static Options: ExplorerOptions = { ClientId: "43c48040-3ceb-43fe-a504-9a9bcb856e40", Language: "en-US", DefaultUserScopes: "openid profile User.ReadWrite User.ReadBasic.All Sites.ReadWrite.All Contacts.ReadWrite People.Read Notes.ReadWrite.All Tasks.ReadWrite Mail.ReadWrite Files.ReadWrite.All Calendars.ReadWrite", AuthUrl: "https://login.microsoftonline.com", GraphUrl: "https://graph.microsoft.com", GraphVersions: GraphApiVersions, PathToBuildDir: "" }; static explorerValues:ExplorerValues = { endpointUrl: AppComponent.Options.GraphUrl + `/${(getParameterByName("version") || "v1.0")}/${getParameterByName("request") || 'me/'}`, selectedOption: getParameterByName("method") as RequestType || "GET", selectedVersion: getParameterByName("version") as GraphApiVersion || "v1.0", authentication: { user: {} }, showImage: false, requestInProgress: false, headers: [], postBody: "" }; static requestHistory: GraphApiCall[] = loadHistoryFromLocalStorage(); static addRequestToHistory(request:GraphApiCall) { AppComponent.requestHistory.splice(0, 0, request); //add history object to the array saveHistoryToLocalStorage(AppComponent.requestHistory); } static removeRequestFromHistory(request:GraphApiCall) { const idx = AppComponent.requestHistory.indexOf(request); if (idx > -1) { AppComponent.requestHistory.splice(idx, 1); } else { console.error("Trying to remove history item that doesn't exist") } saveHistoryToLocalStorage(AppComponent.requestHistory); } static setMessage(message:Message) { AppComponent.message = message; setTimeout(() => {GenericDialogComponent.showDialog();}); } static executeExplorerQuery(fromSample?:boolean) { // #hack. When clicking on an autocomplete option, the model isn't updated if (fromSample != true) AppComponent.explorerValues.endpointUrl = $("#graph-request-url input").val(); let query:GraphApiCall = { requestUrl: AppComponent.explorerValues.endpointUrl, method: AppComponent.explorerValues.selectedOption, requestSentAt: new Date(), headers: AppComponent.explorerValues.headers, postBody: getRequestBodyEditor().getSession().getValue() }; checkHasValidAuthToken(); let graphRequest:Promise<Response>; if (isAuthenticated()) { graphRequest = AppComponent.svc.performQuery(query.method, query.requestUrl, query.postBody, createHeaders(query.headers)); } else { graphRequest = AppComponent.svc.performAnonymousQuery(query.method, query.requestUrl, createHeaders(query.headers)); } this.explorerValues.requestInProgress = true; graphRequest.then((res) => { handleSuccessfulQueryResponse(res, query); }).catch((res) => { handleUnsuccessfulQueryResponse(res, query); }); } static clearResponse() { // clear response preview and headers getAceEditorFromElId("response-header-viewer").getSession().setValue(""); getJsonViewer().getSession().setValue("") this.explorerValues.showImage = false; ResponseStatusBarComponent.clearMessage() } } function isSuccessful(query:GraphApiCall) { return query.statusCode >= 200 && query.statusCode < 300; } function createTextSummary(query:GraphApiCall) { let text = ""; if (isSuccessful(query)) { text += getString(AppComponent.Options, "Success"); } else { text += getString(AppComponent.Options, "Failure"); } text += ` - ${getString(AppComponent.Options, "Status Code")} ${query.statusCode}` text += `<span style="font-weight: 800; margin-left: 40px;">${query.duration}ms</span>`; if (query.statusCode == 401 || query.statusCode == 403) { text += `<span style="margin-left: 40px;">Looks like you may not have the permissions for this call. Please <a href="#" class="c-hyperlink" onclick="window.launchPermissionsDialog()" class="">modify your permissions</a>.</span>` } return text; } function commonResponseHandler(res:Response, query:GraphApiCall) { AppComponent.clearResponse(); // common ops for successful and unsuccessful AppComponent.explorerValues.requestInProgress = false; AppComponent.lastApiCallHeaders = res.headers; let {status, headers} = res; query.duration = (new Date()).getTime() - query.requestSentAt.getTime(); query.statusCode = status; AppComponent.addRequestToHistory(query); AppComponent.messageBarContent = { text: createTextSummary(query), backgroundClass: isSuccessful(query) ? "ms-MessageBar--success" : "ms-MessageBar--error", icon: isSuccessful(query) ? "ms-Icon--Completed" : "ms-Icon--ErrorBadge" } let dataPoints:any[] = [query.statusCode] let urlGraph = constructGraphLinksFromFullPath(query.requestUrl); if (urlGraph && urlGraph.length > 0) { let cleanedUrl = urlGraph.map((link) => link.type).join("/"); dataPoints.push(cleanedUrl); } else { dataPoints.push("UnknownUrl"); } dataPoints.push(isAuthenticated() ? "authenticated" : "demo"); if (typeof ga !== 'undefined') { ga('send', { hitType: 'event', eventCategory: 'GraphExplorer', eventAction: 'ExecuteQuery', eventLabel: dataPoints.join(",") }); } } function handleSuccessfulQueryResponse(res:Response, query:GraphApiCall) { commonResponseHandler(res, query); let {status, headers} = res; let resultBody = res.text(); AppComponent.explorerValues.showImage = false; if (isImageResponse(headers)) { let method = isAuthenticated() ? AppComponent.svc.performQuery : AppComponent.svc.performAnonymousQuery;; handleImageResponse(method, headers, status, handleUnsuccessfulQueryResponse); } else if (isHtmlResponse(headers)) { insertHeadersIntoResponseViewer(headers); handleHtmlResponse(resultBody); } else if (isXmlResponse(resultBody)) { insertHeadersIntoResponseViewer(headers); handleXmlResponse(resultBody); } else { insertHeadersIntoResponseViewer(headers); if (res.text() != "") handleJsonResponse(res.json()); } } function handleUnsuccessfulQueryResponse(res:Response, query:GraphApiCall)
{ commonResponseHandler(res, query); insertHeadersIntoResponseViewer(res.headers); let errorText; try { errorText = res.json(); handleJsonResponse(errorText); return; } catch(e) { errorText = res.text(); } if (errorText.indexOf("<!DOCTYPE html>") != -1) { handleHtmlResponse(errorText); } else { showResults(errorText, "text") } }
identifier_body
app.component.local.ts
// ------------------------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. // ------------------------------------------------------------------------------ import { Component, OnInit, Input, ChangeDetectorRef, DoCheck, AfterViewInit } from '@angular/core'; import { Response, Headers } from '@angular/http'; import { ExplorerOptions, RequestType, ExplorerValues, GraphApiCall, GraphRequestHeader, Message, SampleQuery, MessageBarContent, GraphApiVersions, GraphApiVersion } from "./base"; import { GraphExplorerComponent } from "./GraphExplorerComponent"; import { initAuth, checkHasValidAuthToken, isAuthenticated } from "./auth"; import { initFabricComponents } from "./fabric-components"; import { GraphService } from "./graph-service"; import { isImageResponse, isHtmlResponse, isXmlResponse, handleHtmlResponse, handleXmlResponse, handleJsonResponse, handleImageResponse, insertHeadersIntoResponseViewer, showResults } from "./response-handlers"; import { saveHistoryToLocalStorage, loadHistoryFromLocalStorage } from "./history"; import { createHeaders, getParameterByName } from "./util"; import { getRequestBodyEditor, getAceEditorFromElId, getJsonViewer } from "./api-explorer-jseditor"; import { parseMetadata, constructGraphLinksFromFullPath } from "./graph-structure"; import { ResponseStatusBarComponent } from "./response-status-bar.component"; import { GenericDialogComponent } from "./generic-message-dialog.component"; import { getString } from "./localization-helpers"; declare let mwf, ga, moment; @Component({ selector: 'api-explorer', providers: [GraphService], templateUrl: './app.component.html', styles: [` #explorer-main { padding-left: 12px; } sidebar { padding: 0px; } `] }) export class AppComponent extends GraphExplorerComponent implements OnInit, AfterViewInit { ngAfterViewInit(): void { // Headers aren't updated when that tab is hidden, so when clicking on any tab reinsert the headers if (typeof $ !== "undefined") { $("#response-viewer-labels .ms-Pivot-link").on('click', () => { insertHeadersIntoResponseViewer(AppComponent.lastApiCallHeaders) }); } parseMetadata(this.GraphService, "v1.0"); parseMetadata(this.GraphService, "beta"); } static svc:GraphService; static messageBarContent:MessageBarContent; static lastApiCallHeaders: Headers; static _changeDetectionRef:ChangeDetectorRef; static message:Message; constructor(private GraphService: GraphService, private chRef: ChangeDetectorRef) { super(); AppComponent.svc = GraphService; AppComponent._changeDetectionRef = chRef; } ngOnInit() { for (let key in AppComponent.Options) { if (key in window) AppComponent.Options[key] = window[key]; } AppComponent.Options.GraphVersions.push("Other"); initAuth(AppComponent.Options, this.GraphService, this.chRef); initFabricComponents(); mwf.ComponentFactory.create([{ 'component': mwf.Drawer, }]) moment.locale(AppComponent.Options.Language); } static Options: ExplorerOptions = { ClientId: "43c48040-3ceb-43fe-a504-9a9bcb856e40", Language: "en-US", DefaultUserScopes: "openid profile User.ReadWrite User.ReadBasic.All Sites.ReadWrite.All Contacts.ReadWrite People.Read Notes.ReadWrite.All Tasks.ReadWrite Mail.ReadWrite Files.ReadWrite.All Calendars.ReadWrite", AuthUrl: "https://login.microsoftonline.com", GraphUrl: "https://graph.microsoft.com", GraphVersions: GraphApiVersions, PathToBuildDir: "" }; static explorerValues:ExplorerValues = { endpointUrl: AppComponent.Options.GraphUrl + `/${(getParameterByName("version") || "v1.0")}/${getParameterByName("request") || 'me/'}`, selectedOption: getParameterByName("method") as RequestType || "GET", selectedVersion: getParameterByName("version") as GraphApiVersion || "v1.0", authentication: { user: {} }, showImage: false, requestInProgress: false, headers: [], postBody: "" }; static requestHistory: GraphApiCall[] = loadHistoryFromLocalStorage(); static addRequestToHistory(request:GraphApiCall) { AppComponent.requestHistory.splice(0, 0, request); //add history object to the array saveHistoryToLocalStorage(AppComponent.requestHistory); } static removeRequestFromHistory(request:GraphApiCall) { const idx = AppComponent.requestHistory.indexOf(request); if (idx > -1) { AppComponent.requestHistory.splice(idx, 1); } else { console.error("Trying to remove history item that doesn't exist") } saveHistoryToLocalStorage(AppComponent.requestHistory); } static setMessage(message:Message) { AppComponent.message = message; setTimeout(() => {GenericDialogComponent.showDialog();}); } static executeExplorerQuery(fromSample?:boolean) { // #hack. When clicking on an autocomplete option, the model isn't updated if (fromSample != true) AppComponent.explorerValues.endpointUrl = $("#graph-request-url input").val(); let query:GraphApiCall = { requestUrl: AppComponent.explorerValues.endpointUrl, method: AppComponent.explorerValues.selectedOption, requestSentAt: new Date(), headers: AppComponent.explorerValues.headers, postBody: getRequestBodyEditor().getSession().getValue() }; checkHasValidAuthToken(); let graphRequest:Promise<Response>; if (isAuthenticated()) { graphRequest = AppComponent.svc.performQuery(query.method, query.requestUrl, query.postBody, createHeaders(query.headers)); } else { graphRequest = AppComponent.svc.performAnonymousQuery(query.method, query.requestUrl, createHeaders(query.headers)); } this.explorerValues.requestInProgress = true; graphRequest.then((res) => { handleSuccessfulQueryResponse(res, query); }).catch((res) => { handleUnsuccessfulQueryResponse(res, query); }); } static clearResponse() { // clear response preview and headers getAceEditorFromElId("response-header-viewer").getSession().setValue(""); getJsonViewer().getSession().setValue("") this.explorerValues.showImage = false; ResponseStatusBarComponent.clearMessage() } } function isSuccessful(query:GraphApiCall) { return query.statusCode >= 200 && query.statusCode < 300; } function
(query:GraphApiCall) { let text = ""; if (isSuccessful(query)) { text += getString(AppComponent.Options, "Success"); } else { text += getString(AppComponent.Options, "Failure"); } text += ` - ${getString(AppComponent.Options, "Status Code")} ${query.statusCode}` text += `<span style="font-weight: 800; margin-left: 40px;">${query.duration}ms</span>`; if (query.statusCode == 401 || query.statusCode == 403) { text += `<span style="margin-left: 40px;">Looks like you may not have the permissions for this call. Please <a href="#" class="c-hyperlink" onclick="window.launchPermissionsDialog()" class="">modify your permissions</a>.</span>` } return text; } function commonResponseHandler(res:Response, query:GraphApiCall) { AppComponent.clearResponse(); // common ops for successful and unsuccessful AppComponent.explorerValues.requestInProgress = false; AppComponent.lastApiCallHeaders = res.headers; let {status, headers} = res; query.duration = (new Date()).getTime() - query.requestSentAt.getTime(); query.statusCode = status; AppComponent.addRequestToHistory(query); AppComponent.messageBarContent = { text: createTextSummary(query), backgroundClass: isSuccessful(query) ? "ms-MessageBar--success" : "ms-MessageBar--error", icon: isSuccessful(query) ? "ms-Icon--Completed" : "ms-Icon--ErrorBadge" } let dataPoints:any[] = [query.statusCode] let urlGraph = constructGraphLinksFromFullPath(query.requestUrl); if (urlGraph && urlGraph.length > 0) { let cleanedUrl = urlGraph.map((link) => link.type).join("/"); dataPoints.push(cleanedUrl); } else { dataPoints.push("UnknownUrl"); } dataPoints.push(isAuthenticated() ? "authenticated" : "demo"); if (typeof ga !== 'undefined') { ga('send', { hitType: 'event', eventCategory: 'GraphExplorer', eventAction: 'ExecuteQuery', eventLabel: dataPoints.join(",") }); } } function handleSuccessfulQueryResponse(res:Response, query:GraphApiCall) { commonResponseHandler(res, query); let {status, headers} = res; let resultBody = res.text(); AppComponent.explorerValues.showImage = false; if (isImageResponse(headers)) { let method = isAuthenticated() ? AppComponent.svc.performQuery : AppComponent.svc.performAnonymousQuery;; handleImageResponse(method, headers, status, handleUnsuccessfulQueryResponse); } else if (isHtmlResponse(headers)) { insertHeadersIntoResponseViewer(headers); handleHtmlResponse(resultBody); } else if (isXmlResponse(resultBody)) { insertHeadersIntoResponseViewer(headers); handleXmlResponse(resultBody); } else { insertHeadersIntoResponseViewer(headers); if (res.text() != "") handleJsonResponse(res.json()); } } function handleUnsuccessfulQueryResponse(res:Response, query:GraphApiCall) { commonResponseHandler(res, query); insertHeadersIntoResponseViewer(res.headers); let errorText; try { errorText = res.json(); handleJsonResponse(errorText); return; } catch(e) { errorText = res.text(); } if (errorText.indexOf("<!DOCTYPE html>") != -1) { handleHtmlResponse(errorText); } else { showResults(errorText, "text") } }
createTextSummary
identifier_name
app.component.local.ts
// ------------------------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All Rights Reserved. Licensed under the MIT License. See License in the project root for license information. // ------------------------------------------------------------------------------ import { Component, OnInit, Input, ChangeDetectorRef, DoCheck, AfterViewInit } from '@angular/core'; import { Response, Headers } from '@angular/http'; import { ExplorerOptions, RequestType, ExplorerValues, GraphApiCall, GraphRequestHeader, Message, SampleQuery, MessageBarContent, GraphApiVersions, GraphApiVersion } from "./base"; import { GraphExplorerComponent } from "./GraphExplorerComponent"; import { initAuth, checkHasValidAuthToken, isAuthenticated } from "./auth"; import { initFabricComponents } from "./fabric-components"; import { GraphService } from "./graph-service"; import { isImageResponse, isHtmlResponse, isXmlResponse, handleHtmlResponse, handleXmlResponse, handleJsonResponse, handleImageResponse, insertHeadersIntoResponseViewer, showResults } from "./response-handlers"; import { saveHistoryToLocalStorage, loadHistoryFromLocalStorage } from "./history"; import { createHeaders, getParameterByName } from "./util"; import { getRequestBodyEditor, getAceEditorFromElId, getJsonViewer } from "./api-explorer-jseditor"; import { parseMetadata, constructGraphLinksFromFullPath } from "./graph-structure"; import { ResponseStatusBarComponent } from "./response-status-bar.component"; import { GenericDialogComponent } from "./generic-message-dialog.component"; import { getString } from "./localization-helpers"; declare let mwf, ga, moment; @Component({ selector: 'api-explorer', providers: [GraphService], templateUrl: './app.component.html', styles: [` #explorer-main { padding-left: 12px; } sidebar {
`] }) export class AppComponent extends GraphExplorerComponent implements OnInit, AfterViewInit { ngAfterViewInit(): void { // Headers aren't updated when that tab is hidden, so when clicking on any tab reinsert the headers if (typeof $ !== "undefined") { $("#response-viewer-labels .ms-Pivot-link").on('click', () => { insertHeadersIntoResponseViewer(AppComponent.lastApiCallHeaders) }); } parseMetadata(this.GraphService, "v1.0"); parseMetadata(this.GraphService, "beta"); } static svc:GraphService; static messageBarContent:MessageBarContent; static lastApiCallHeaders: Headers; static _changeDetectionRef:ChangeDetectorRef; static message:Message; constructor(private GraphService: GraphService, private chRef: ChangeDetectorRef) { super(); AppComponent.svc = GraphService; AppComponent._changeDetectionRef = chRef; } ngOnInit() { for (let key in AppComponent.Options) { if (key in window) AppComponent.Options[key] = window[key]; } AppComponent.Options.GraphVersions.push("Other"); initAuth(AppComponent.Options, this.GraphService, this.chRef); initFabricComponents(); mwf.ComponentFactory.create([{ 'component': mwf.Drawer, }]) moment.locale(AppComponent.Options.Language); } static Options: ExplorerOptions = { ClientId: "43c48040-3ceb-43fe-a504-9a9bcb856e40", Language: "en-US", DefaultUserScopes: "openid profile User.ReadWrite User.ReadBasic.All Sites.ReadWrite.All Contacts.ReadWrite People.Read Notes.ReadWrite.All Tasks.ReadWrite Mail.ReadWrite Files.ReadWrite.All Calendars.ReadWrite", AuthUrl: "https://login.microsoftonline.com", GraphUrl: "https://graph.microsoft.com", GraphVersions: GraphApiVersions, PathToBuildDir: "" }; static explorerValues:ExplorerValues = { endpointUrl: AppComponent.Options.GraphUrl + `/${(getParameterByName("version") || "v1.0")}/${getParameterByName("request") || 'me/'}`, selectedOption: getParameterByName("method") as RequestType || "GET", selectedVersion: getParameterByName("version") as GraphApiVersion || "v1.0", authentication: { user: {} }, showImage: false, requestInProgress: false, headers: [], postBody: "" }; static requestHistory: GraphApiCall[] = loadHistoryFromLocalStorage(); static addRequestToHistory(request:GraphApiCall) { AppComponent.requestHistory.splice(0, 0, request); //add history object to the array saveHistoryToLocalStorage(AppComponent.requestHistory); } static removeRequestFromHistory(request:GraphApiCall) { const idx = AppComponent.requestHistory.indexOf(request); if (idx > -1) { AppComponent.requestHistory.splice(idx, 1); } else { console.error("Trying to remove history item that doesn't exist") } saveHistoryToLocalStorage(AppComponent.requestHistory); } static setMessage(message:Message) { AppComponent.message = message; setTimeout(() => {GenericDialogComponent.showDialog();}); } static executeExplorerQuery(fromSample?:boolean) { // #hack. When clicking on an autocomplete option, the model isn't updated if (fromSample != true) AppComponent.explorerValues.endpointUrl = $("#graph-request-url input").val(); let query:GraphApiCall = { requestUrl: AppComponent.explorerValues.endpointUrl, method: AppComponent.explorerValues.selectedOption, requestSentAt: new Date(), headers: AppComponent.explorerValues.headers, postBody: getRequestBodyEditor().getSession().getValue() }; checkHasValidAuthToken(); let graphRequest:Promise<Response>; if (isAuthenticated()) { graphRequest = AppComponent.svc.performQuery(query.method, query.requestUrl, query.postBody, createHeaders(query.headers)); } else { graphRequest = AppComponent.svc.performAnonymousQuery(query.method, query.requestUrl, createHeaders(query.headers)); } this.explorerValues.requestInProgress = true; graphRequest.then((res) => { handleSuccessfulQueryResponse(res, query); }).catch((res) => { handleUnsuccessfulQueryResponse(res, query); }); } static clearResponse() { // clear response preview and headers getAceEditorFromElId("response-header-viewer").getSession().setValue(""); getJsonViewer().getSession().setValue("") this.explorerValues.showImage = false; ResponseStatusBarComponent.clearMessage() } } function isSuccessful(query:GraphApiCall) { return query.statusCode >= 200 && query.statusCode < 300; } function createTextSummary(query:GraphApiCall) { let text = ""; if (isSuccessful(query)) { text += getString(AppComponent.Options, "Success"); } else { text += getString(AppComponent.Options, "Failure"); } text += ` - ${getString(AppComponent.Options, "Status Code")} ${query.statusCode}` text += `<span style="font-weight: 800; margin-left: 40px;">${query.duration}ms</span>`; if (query.statusCode == 401 || query.statusCode == 403) { text += `<span style="margin-left: 40px;">Looks like you may not have the permissions for this call. Please <a href="#" class="c-hyperlink" onclick="window.launchPermissionsDialog()" class="">modify your permissions</a>.</span>` } return text; } function commonResponseHandler(res:Response, query:GraphApiCall) { AppComponent.clearResponse(); // common ops for successful and unsuccessful AppComponent.explorerValues.requestInProgress = false; AppComponent.lastApiCallHeaders = res.headers; let {status, headers} = res; query.duration = (new Date()).getTime() - query.requestSentAt.getTime(); query.statusCode = status; AppComponent.addRequestToHistory(query); AppComponent.messageBarContent = { text: createTextSummary(query), backgroundClass: isSuccessful(query) ? "ms-MessageBar--success" : "ms-MessageBar--error", icon: isSuccessful(query) ? "ms-Icon--Completed" : "ms-Icon--ErrorBadge" } let dataPoints:any[] = [query.statusCode] let urlGraph = constructGraphLinksFromFullPath(query.requestUrl); if (urlGraph && urlGraph.length > 0) { let cleanedUrl = urlGraph.map((link) => link.type).join("/"); dataPoints.push(cleanedUrl); } else { dataPoints.push("UnknownUrl"); } dataPoints.push(isAuthenticated() ? "authenticated" : "demo"); if (typeof ga !== 'undefined') { ga('send', { hitType: 'event', eventCategory: 'GraphExplorer', eventAction: 'ExecuteQuery', eventLabel: dataPoints.join(",") }); } } function handleSuccessfulQueryResponse(res:Response, query:GraphApiCall) { commonResponseHandler(res, query); let {status, headers} = res; let resultBody = res.text(); AppComponent.explorerValues.showImage = false; if (isImageResponse(headers)) { let method = isAuthenticated() ? AppComponent.svc.performQuery : AppComponent.svc.performAnonymousQuery;; handleImageResponse(method, headers, status, handleUnsuccessfulQueryResponse); } else if (isHtmlResponse(headers)) { insertHeadersIntoResponseViewer(headers); handleHtmlResponse(resultBody); } else if (isXmlResponse(resultBody)) { insertHeadersIntoResponseViewer(headers); handleXmlResponse(resultBody); } else { insertHeadersIntoResponseViewer(headers); if (res.text() != "") handleJsonResponse(res.json()); } } function handleUnsuccessfulQueryResponse(res:Response, query:GraphApiCall) { commonResponseHandler(res, query); insertHeadersIntoResponseViewer(res.headers); let errorText; try { errorText = res.json(); handleJsonResponse(errorText); return; } catch(e) { errorText = res.text(); } if (errorText.indexOf("<!DOCTYPE html>") != -1) { handleHtmlResponse(errorText); } else { showResults(errorText, "text") } }
padding: 0px; }
random_line_split
__init__.py
""" Deployment management for KnightLab web application projects. Read the README. """ import os from os.path import abspath, dirname import sys from datetime import datetime import zipfile import zlib from fabric.api import env, put, local, settings, hide from fabric.context_managers import lcd from fabric.decorators import task from fabric.operations import prompt from fabric.tasks import execute from .fos import clean, exists, join from .utils import notice, warn, abort, do, confirm from . import aws, git, static if not 'project_name' in env: abort('You must set env.project_name in your fabfile') # # Set to parent directory of repositories # env.sites_path = dirname(dirname(abspath(__file__))) # # Set path to s3cmd.cnf in secrets repository # env.s3cmd_cfg = join(env.sites_path, 'secrets', 's3cmd.cfg') # # Load config.json from project directory? # _config = None config_json_path = join(env.sites_path, env.project_name, 'config.json') try: _config = static.load_config(config_json_path) notice('Loaded config @ %s' % config_json_path) except IOError: notice('No config found @ %s' % config_json_path) def _setup_env(): """Setup the local working environment.""" env.home_path = os.path.expanduser('~') env.env_path = os.getenv('WORKON_HOME') if not env.env_path: warn("You should set the WORKON_HOME environment variable to" \ " the root directory for your virtual environments.") env.env_path = env.sites_path env.project_path = join(env.sites_path, env.project_name) env.ve_path = join(env.env_path, env.project_name) env.activate_path = join(env.ve_path, 'bin', 'activate') def _s3cmd_put(src_path, bucket): """Copy local directory to S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s put' \ ' --rexclude ".*/\.[^/]*$"' \ ' --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' -r %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) def _s3cmd_sync(src_path, bucket): """Sync local directory with S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s sync' \ ' --rexclude ".*/\.[^/]*$"' \ ' --delete-removed --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' --no-preserve' \ ' %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) ############################################################ # JS libraries ############################################################ if _config: # Set env.cdn_path = path to cdn repository env.cdn_path = abspath(join(_config['root_path'], 'cdn.knightlab.com', 'app', 'libs', _config['name'])) def _make_zip(file_path): notice('Creating zip file: %s' % file_path) with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip: for r in _config['stage']: static.add_zip_files(f_zip, _config, [{ "src": r['src'], "dst": _config['name'], "regex": r['regex']}]) @task def build(): """Build lib version""" _setup_env() # Get build config if not 'build' in _config: abort('Could not find "build" in config file') # Check version if not 'version' in _config: _config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') warn('Using development version value "%(version)s"' % _config) notice('Building version %(version)s...' % _config) # Clean build directory clean(_config['build_path']) # Build it for key, param in _config['build'].iteritems(): getattr(static, key)(_config, param) @task def stage(): """Build/commit/tag/push lib version, copy to local cdn repo""" _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Ask user for a new version _config['version'] = git.prompt_tag('Enter a new version number', unique=True) # Build version # use execute to allow for other implementations of 'build' execute('build') # Commit/push/tag with lcd(env.project_path): with settings(warn_only=True): local('git add build') # support builds where there's no change; sometimes comes up when # reusing a tag because of an unexpected problem with settings(warn_only=True): msg = local('git commit -m "Release %(version)s"' % _config,capture=True) if 'nothing to commit' in msg: warn(msg) warn('continuing anyway') elif not msg.startswith('[master'): abort("Unexpected result: %s" % msg) local('git push') git.push_tag(_config['version']) # Copy to local CDN repository cdn_path = join(env.cdn_path, _config['version']) clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_dev(): """ Build lib and copy to local cdn repository as 'dev' version No tagging/committing/etc/ """ _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Build version build() # Copy to local CDN repository cdn_path = join(env.cdn_path, 'dev') clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_latest(): """Copy lib version to latest within local cdn repo""" _setup_env() if 'version' in _config: version = _config['version'] else: version = git.prompt_tag('Which version to stage as "latest"?') notice('stage_latest: %s' % version) # Make sure version has been staged version_cdn_path = join(env.cdn_path, version) if not os.path.exists(version_cdn_path): abort("Version '%s' has not been staged" % version) # Stage version as latest latest_cdn_path = join(env.cdn_path, 'latest') clean(latest_cdn_path) static.copy(_config, [{ "src": version_cdn_path, "dst": latest_cdn_path}]) @task def untag(): """Delete a tag (in case of error)""" version = git.prompt_tag('Which tag to delete?') if not version: abort('No available version tag') git.delete_tag(version) ############################################################ # Static websites deployed to S3 ############################################################ if _config and 'deploy' in _config: @task def undeploy(env_type): """Delete website from S3 bucket. Specify stg|prd as argument.""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]:
bucket = _config['deploy'][env_type]['bucket'] warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket) if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()): abort('Aborting.') with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \ % (env.s3cmd_cfg, bucket)) @task def render(env_type): """Render templates (deploy except for actual sync with S3)""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) if 'usemin_context' in _config['deploy'][env_type]: usemin_context = _config['deploy'][env_type]['usemin_context'] else: usemin_context = None template_path = join(_config['project_path'], 'website', 'templates') deploy_path = join(_config['project_path'], 'build', 'website') clean(deploy_path) # Render templates and run usemin if 'deploy_context' in _config['deploy'][env_type]: deploy_context = _config['deploy'][env_type]['deploy_context'] else: deploy_context = {} # Sometimes we need this path append to import app from website # in render_templates, dunno why: sys.path.append(_config['project_path']) static.render_templates(template_path, deploy_path, deploy_context) static.usemin(_config, [deploy_path], usemin_context) # Copy static files static.copy(_config, [{ "src": join(_config['project_path'], 'website', 'static'), "dst": join(deploy_path, 'static') }]) # Additional copy? if 'copy' in _config['deploy'][env_type]: static.copy(_config, _config['deploy'][env_type]['copy']) @task def put(env_type): """Put (copy) website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('copying to %s' % bucket) # Copy to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_put(deploy_path, bucket) @task def deploy(env_type): """Deploy website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('deploying to %s' % bucket) # Sync to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_sync(deploy_path, bucket) @task def serve(ssl='n', port='5000'): """Run the development server""" if not 'project_path' in env: _setup_env() opts = ' -p '+port if do(ssl): opts += ' -s' with lcd(join(env.project_path)): if exists(join(env.project_path, 'manage.py')): local('python manage.py runserver') elif _config and 'deploy' in _config: if int(port) < 1024: local('sudo python website/app.py'+opts) else: local('python website/app.py'+opts) else: if int(port) < 1024: local('sudo python api.py'+opts) else: local('python api.py'+opts) @task def dump(): """Dump env and config (if applicable) to stdout""" import pprint import json notice('dumping env...') pprint.pprint(env) if _config: notice('dumping config...') print _config
abort('Could not find "bucket" in deploy.%s" in config file' % env_type)
conditional_block
__init__.py
""" Deployment management for KnightLab web application projects. Read the README. """ import os from os.path import abspath, dirname import sys from datetime import datetime import zipfile import zlib from fabric.api import env, put, local, settings, hide from fabric.context_managers import lcd from fabric.decorators import task from fabric.operations import prompt from fabric.tasks import execute from .fos import clean, exists, join from .utils import notice, warn, abort, do, confirm from . import aws, git, static if not 'project_name' in env: abort('You must set env.project_name in your fabfile') # # Set to parent directory of repositories # env.sites_path = dirname(dirname(abspath(__file__))) # # Set path to s3cmd.cnf in secrets repository # env.s3cmd_cfg = join(env.sites_path, 'secrets', 's3cmd.cfg') # # Load config.json from project directory? # _config = None config_json_path = join(env.sites_path, env.project_name, 'config.json') try: _config = static.load_config(config_json_path) notice('Loaded config @ %s' % config_json_path) except IOError: notice('No config found @ %s' % config_json_path) def _setup_env(): """Setup the local working environment.""" env.home_path = os.path.expanduser('~') env.env_path = os.getenv('WORKON_HOME') if not env.env_path: warn("You should set the WORKON_HOME environment variable to" \ " the root directory for your virtual environments.") env.env_path = env.sites_path env.project_path = join(env.sites_path, env.project_name) env.ve_path = join(env.env_path, env.project_name) env.activate_path = join(env.ve_path, 'bin', 'activate') def _s3cmd_put(src_path, bucket): """Copy local directory to S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s put' \ ' --rexclude ".*/\.[^/]*$"' \ ' --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' -r %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) def _s3cmd_sync(src_path, bucket): """Sync local directory with S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s sync' \ ' --rexclude ".*/\.[^/]*$"' \ ' --delete-removed --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' --no-preserve' \ ' %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) ############################################################ # JS libraries ############################################################ if _config: # Set env.cdn_path = path to cdn repository env.cdn_path = abspath(join(_config['root_path'], 'cdn.knightlab.com', 'app', 'libs', _config['name'])) def _make_zip(file_path): notice('Creating zip file: %s' % file_path) with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip: for r in _config['stage']: static.add_zip_files(f_zip, _config, [{ "src": r['src'], "dst": _config['name'], "regex": r['regex']}]) @task def build():
@task def stage(): """Build/commit/tag/push lib version, copy to local cdn repo""" _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Ask user for a new version _config['version'] = git.prompt_tag('Enter a new version number', unique=True) # Build version # use execute to allow for other implementations of 'build' execute('build') # Commit/push/tag with lcd(env.project_path): with settings(warn_only=True): local('git add build') # support builds where there's no change; sometimes comes up when # reusing a tag because of an unexpected problem with settings(warn_only=True): msg = local('git commit -m "Release %(version)s"' % _config,capture=True) if 'nothing to commit' in msg: warn(msg) warn('continuing anyway') elif not msg.startswith('[master'): abort("Unexpected result: %s" % msg) local('git push') git.push_tag(_config['version']) # Copy to local CDN repository cdn_path = join(env.cdn_path, _config['version']) clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_dev(): """ Build lib and copy to local cdn repository as 'dev' version No tagging/committing/etc/ """ _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Build version build() # Copy to local CDN repository cdn_path = join(env.cdn_path, 'dev') clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_latest(): """Copy lib version to latest within local cdn repo""" _setup_env() if 'version' in _config: version = _config['version'] else: version = git.prompt_tag('Which version to stage as "latest"?') notice('stage_latest: %s' % version) # Make sure version has been staged version_cdn_path = join(env.cdn_path, version) if not os.path.exists(version_cdn_path): abort("Version '%s' has not been staged" % version) # Stage version as latest latest_cdn_path = join(env.cdn_path, 'latest') clean(latest_cdn_path) static.copy(_config, [{ "src": version_cdn_path, "dst": latest_cdn_path}]) @task def untag(): """Delete a tag (in case of error)""" version = git.prompt_tag('Which tag to delete?') if not version: abort('No available version tag') git.delete_tag(version) ############################################################ # Static websites deployed to S3 ############################################################ if _config and 'deploy' in _config: @task def undeploy(env_type): """Delete website from S3 bucket. Specify stg|prd as argument.""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) bucket = _config['deploy'][env_type]['bucket'] warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket) if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()): abort('Aborting.') with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \ % (env.s3cmd_cfg, bucket)) @task def render(env_type): """Render templates (deploy except for actual sync with S3)""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) if 'usemin_context' in _config['deploy'][env_type]: usemin_context = _config['deploy'][env_type]['usemin_context'] else: usemin_context = None template_path = join(_config['project_path'], 'website', 'templates') deploy_path = join(_config['project_path'], 'build', 'website') clean(deploy_path) # Render templates and run usemin if 'deploy_context' in _config['deploy'][env_type]: deploy_context = _config['deploy'][env_type]['deploy_context'] else: deploy_context = {} # Sometimes we need this path append to import app from website # in render_templates, dunno why: sys.path.append(_config['project_path']) static.render_templates(template_path, deploy_path, deploy_context) static.usemin(_config, [deploy_path], usemin_context) # Copy static files static.copy(_config, [{ "src": join(_config['project_path'], 'website', 'static'), "dst": join(deploy_path, 'static') }]) # Additional copy? if 'copy' in _config['deploy'][env_type]: static.copy(_config, _config['deploy'][env_type]['copy']) @task def put(env_type): """Put (copy) website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('copying to %s' % bucket) # Copy to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_put(deploy_path, bucket) @task def deploy(env_type): """Deploy website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('deploying to %s' % bucket) # Sync to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_sync(deploy_path, bucket) @task def serve(ssl='n', port='5000'): """Run the development server""" if not 'project_path' in env: _setup_env() opts = ' -p '+port if do(ssl): opts += ' -s' with lcd(join(env.project_path)): if exists(join(env.project_path, 'manage.py')): local('python manage.py runserver') elif _config and 'deploy' in _config: if int(port) < 1024: local('sudo python website/app.py'+opts) else: local('python website/app.py'+opts) else: if int(port) < 1024: local('sudo python api.py'+opts) else: local('python api.py'+opts) @task def dump(): """Dump env and config (if applicable) to stdout""" import pprint import json notice('dumping env...') pprint.pprint(env) if _config: notice('dumping config...') print _config
"""Build lib version""" _setup_env() # Get build config if not 'build' in _config: abort('Could not find "build" in config file') # Check version if not 'version' in _config: _config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') warn('Using development version value "%(version)s"' % _config) notice('Building version %(version)s...' % _config) # Clean build directory clean(_config['build_path']) # Build it for key, param in _config['build'].iteritems(): getattr(static, key)(_config, param)
identifier_body
__init__.py
""" Deployment management for KnightLab web application projects. Read the README. """ import os from os.path import abspath, dirname import sys from datetime import datetime import zipfile import zlib from fabric.api import env, put, local, settings, hide from fabric.context_managers import lcd from fabric.decorators import task from fabric.operations import prompt from fabric.tasks import execute from .fos import clean, exists, join from .utils import notice, warn, abort, do, confirm from . import aws, git, static if not 'project_name' in env: abort('You must set env.project_name in your fabfile') # # Set to parent directory of repositories # env.sites_path = dirname(dirname(abspath(__file__))) # # Set path to s3cmd.cnf in secrets repository # env.s3cmd_cfg = join(env.sites_path, 'secrets', 's3cmd.cfg') # # Load config.json from project directory? # _config = None config_json_path = join(env.sites_path, env.project_name, 'config.json') try: _config = static.load_config(config_json_path) notice('Loaded config @ %s' % config_json_path) except IOError: notice('No config found @ %s' % config_json_path) def _setup_env(): """Setup the local working environment.""" env.home_path = os.path.expanduser('~') env.env_path = os.getenv('WORKON_HOME') if not env.env_path: warn("You should set the WORKON_HOME environment variable to" \ " the root directory for your virtual environments.") env.env_path = env.sites_path env.project_path = join(env.sites_path, env.project_name) env.ve_path = join(env.env_path, env.project_name) env.activate_path = join(env.ve_path, 'bin', 'activate') def _s3cmd_put(src_path, bucket): """Copy local directory to S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s put' \ ' --rexclude ".*/\.[^/]*$"' \ ' --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' -r %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) def _s3cmd_sync(src_path, bucket): """Sync local directory with S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s sync' \ ' --rexclude ".*/\.[^/]*$"' \ ' --delete-removed --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' --no-preserve' \ ' %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) ############################################################ # JS libraries ############################################################ if _config: # Set env.cdn_path = path to cdn repository env.cdn_path = abspath(join(_config['root_path'], 'cdn.knightlab.com', 'app', 'libs', _config['name'])) def _make_zip(file_path): notice('Creating zip file: %s' % file_path) with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip: for r in _config['stage']: static.add_zip_files(f_zip, _config, [{ "src": r['src'], "dst": _config['name'], "regex": r['regex']}]) @task def build(): """Build lib version""" _setup_env() # Get build config if not 'build' in _config: abort('Could not find "build" in config file') # Check version if not 'version' in _config: _config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') warn('Using development version value "%(version)s"' % _config) notice('Building version %(version)s...' % _config) # Clean build directory clean(_config['build_path']) # Build it for key, param in _config['build'].iteritems(): getattr(static, key)(_config, param) @task def stage(): """Build/commit/tag/push lib version, copy to local cdn repo""" _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Ask user for a new version _config['version'] = git.prompt_tag('Enter a new version number', unique=True) # Build version # use execute to allow for other implementations of 'build' execute('build') # Commit/push/tag with lcd(env.project_path): with settings(warn_only=True): local('git add build') # support builds where there's no change; sometimes comes up when # reusing a tag because of an unexpected problem with settings(warn_only=True): msg = local('git commit -m "Release %(version)s"' % _config,capture=True) if 'nothing to commit' in msg: warn(msg) warn('continuing anyway') elif not msg.startswith('[master'): abort("Unexpected result: %s" % msg) local('git push') git.push_tag(_config['version']) # Copy to local CDN repository cdn_path = join(env.cdn_path, _config['version']) clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_dev(): """ Build lib and copy to local cdn repository as 'dev' version No tagging/committing/etc/ """ _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Build version build() # Copy to local CDN repository cdn_path = join(env.cdn_path, 'dev') clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_latest(): """Copy lib version to latest within local cdn repo""" _setup_env() if 'version' in _config: version = _config['version'] else: version = git.prompt_tag('Which version to stage as "latest"?') notice('stage_latest: %s' % version) # Make sure version has been staged version_cdn_path = join(env.cdn_path, version) if not os.path.exists(version_cdn_path): abort("Version '%s' has not been staged" % version) # Stage version as latest latest_cdn_path = join(env.cdn_path, 'latest') clean(latest_cdn_path) static.copy(_config, [{ "src": version_cdn_path, "dst": latest_cdn_path}]) @task def untag(): """Delete a tag (in case of error)""" version = git.prompt_tag('Which tag to delete?') if not version: abort('No available version tag') git.delete_tag(version) ############################################################ # Static websites deployed to S3 ############################################################ if _config and 'deploy' in _config: @task def
(env_type): """Delete website from S3 bucket. Specify stg|prd as argument.""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) bucket = _config['deploy'][env_type]['bucket'] warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket) if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()): abort('Aborting.') with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \ % (env.s3cmd_cfg, bucket)) @task def render(env_type): """Render templates (deploy except for actual sync with S3)""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) if 'usemin_context' in _config['deploy'][env_type]: usemin_context = _config['deploy'][env_type]['usemin_context'] else: usemin_context = None template_path = join(_config['project_path'], 'website', 'templates') deploy_path = join(_config['project_path'], 'build', 'website') clean(deploy_path) # Render templates and run usemin if 'deploy_context' in _config['deploy'][env_type]: deploy_context = _config['deploy'][env_type]['deploy_context'] else: deploy_context = {} # Sometimes we need this path append to import app from website # in render_templates, dunno why: sys.path.append(_config['project_path']) static.render_templates(template_path, deploy_path, deploy_context) static.usemin(_config, [deploy_path], usemin_context) # Copy static files static.copy(_config, [{ "src": join(_config['project_path'], 'website', 'static'), "dst": join(deploy_path, 'static') }]) # Additional copy? if 'copy' in _config['deploy'][env_type]: static.copy(_config, _config['deploy'][env_type]['copy']) @task def put(env_type): """Put (copy) website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('copying to %s' % bucket) # Copy to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_put(deploy_path, bucket) @task def deploy(env_type): """Deploy website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('deploying to %s' % bucket) # Sync to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_sync(deploy_path, bucket) @task def serve(ssl='n', port='5000'): """Run the development server""" if not 'project_path' in env: _setup_env() opts = ' -p '+port if do(ssl): opts += ' -s' with lcd(join(env.project_path)): if exists(join(env.project_path, 'manage.py')): local('python manage.py runserver') elif _config and 'deploy' in _config: if int(port) < 1024: local('sudo python website/app.py'+opts) else: local('python website/app.py'+opts) else: if int(port) < 1024: local('sudo python api.py'+opts) else: local('python api.py'+opts) @task def dump(): """Dump env and config (if applicable) to stdout""" import pprint import json notice('dumping env...') pprint.pprint(env) if _config: notice('dumping config...') print _config
undeploy
identifier_name
__init__.py
""" Deployment management for KnightLab web application projects. Read the README. """ import os from os.path import abspath, dirname import sys from datetime import datetime import zipfile import zlib from fabric.api import env, put, local, settings, hide from fabric.context_managers import lcd from fabric.decorators import task from fabric.operations import prompt from fabric.tasks import execute from .fos import clean, exists, join from .utils import notice, warn, abort, do, confirm from . import aws, git, static if not 'project_name' in env: abort('You must set env.project_name in your fabfile') # # Set to parent directory of repositories # env.sites_path = dirname(dirname(abspath(__file__))) # # Set path to s3cmd.cnf in secrets repository # env.s3cmd_cfg = join(env.sites_path, 'secrets', 's3cmd.cfg') # # Load config.json from project directory? # _config = None config_json_path = join(env.sites_path, env.project_name, 'config.json') try: _config = static.load_config(config_json_path) notice('Loaded config @ %s' % config_json_path) except IOError: notice('No config found @ %s' % config_json_path) def _setup_env(): """Setup the local working environment.""" env.home_path = os.path.expanduser('~') env.env_path = os.getenv('WORKON_HOME') if not env.env_path: warn("You should set the WORKON_HOME environment variable to" \ " the root directory for your virtual environments.") env.env_path = env.sites_path env.project_path = join(env.sites_path, env.project_name) env.ve_path = join(env.env_path, env.project_name) env.activate_path = join(env.ve_path, 'bin', 'activate') def _s3cmd_put(src_path, bucket): """Copy local directory to S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s put' \ ' --rexclude ".*/\.[^/]*$"' \ ' --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' -r %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) def _s3cmd_sync(src_path, bucket): """Sync local directory with S3 bucket""" if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'." % env) with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s sync' \ ' --rexclude ".*/\.[^/]*$"' \ ' --delete-removed --acl-public' \ ' --add-header="Cache-Control:max-age=300"' \ ' --no-preserve' \ ' %s/ s3://%s/' \ % (env.s3cmd_cfg, src_path, bucket)) ############################################################ # JS libraries ############################################################ if _config: # Set env.cdn_path = path to cdn repository env.cdn_path = abspath(join(_config['root_path'], 'cdn.knightlab.com', 'app', 'libs', _config['name'])) def _make_zip(file_path): notice('Creating zip file: %s' % file_path) with zipfile.ZipFile(file_path, 'w', zipfile.ZIP_DEFLATED) as f_zip: for r in _config['stage']: static.add_zip_files(f_zip, _config, [{ "src": r['src'], "dst": _config['name'], "regex": r['regex']}]) @task def build(): """Build lib version""" _setup_env() # Get build config if not 'build' in _config: abort('Could not find "build" in config file') # Check version if not 'version' in _config: _config['version'] = datetime.utcnow().strftime('%Y-%m-%d-%H-%M-%S') warn('Using development version value "%(version)s"' % _config) notice('Building version %(version)s...' % _config) # Clean build directory clean(_config['build_path']) # Build it for key, param in _config['build'].iteritems(): getattr(static, key)(_config, param) @task def stage(): """Build/commit/tag/push lib version, copy to local cdn repo""" _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Ask user for a new version _config['version'] = git.prompt_tag('Enter a new version number', unique=True) # Build version # use execute to allow for other implementations of 'build' execute('build') # Commit/push/tag with lcd(env.project_path): with settings(warn_only=True): local('git add build') # support builds where there's no change; sometimes comes up when # reusing a tag because of an unexpected problem with settings(warn_only=True): msg = local('git commit -m "Release %(version)s"' % _config,capture=True) if 'nothing to commit' in msg: warn(msg) warn('continuing anyway') elif not msg.startswith('[master'): abort("Unexpected result: %s" % msg) local('git push') git.push_tag(_config['version']) # Copy to local CDN repository cdn_path = join(env.cdn_path, _config['version']) clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_dev(): """ Build lib and copy to local cdn repository as 'dev' version No tagging/committing/etc/ """ _setup_env() if not 'stage' in _config: abort('Could not find "stage" in config file') # Make sure cdn exists exists(dirname(env.cdn_path), required=True) # Build version build() # Copy to local CDN repository cdn_path = join(env.cdn_path, 'dev') clean(cdn_path) for r in _config['stage']: static.copy(_config, [{ "src": r['src'], "dst": cdn_path, "regex": r['regex']}]) # Create zip file in local CDN repository _make_zip(join(cdn_path, '%(name)s.zip' % _config)) @task def stage_latest(): """Copy lib version to latest within local cdn repo""" _setup_env() if 'version' in _config: version = _config['version'] else: version = git.prompt_tag('Which version to stage as "latest"?') notice('stage_latest: %s' % version) # Make sure version has been staged version_cdn_path = join(env.cdn_path, version) if not os.path.exists(version_cdn_path): abort("Version '%s' has not been staged" % version) # Stage version as latest latest_cdn_path = join(env.cdn_path, 'latest') clean(latest_cdn_path) static.copy(_config, [{ "src": version_cdn_path, "dst": latest_cdn_path}]) @task def untag(): """Delete a tag (in case of error)""" version = git.prompt_tag('Which tag to delete?') if not version: abort('No available version tag') git.delete_tag(version)
############################################################ # Static websites deployed to S3 ############################################################ if _config and 'deploy' in _config: @task def undeploy(env_type): """Delete website from S3 bucket. Specify stg|prd as argument.""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) bucket = _config['deploy'][env_type]['bucket'] warn('YOU ARE ABOUT TO DELETE EVERYTHING IN %s' % bucket) if not do(prompt("Are you ABSOLUTELY sure you want to do this? (y/n): ").strip()): abort('Aborting.') with lcd(env.sites_path): local('fablib/bin/s3cmd --config=%s del -r --force s3://%s/' \ % (env.s3cmd_cfg, bucket)) @task def render(env_type): """Render templates (deploy except for actual sync with S3)""" _setup_env() # Activate local virtual environment (for render_templates+flask?) local('. %s' % env.activate_path) if not os.path.exists(env.s3cmd_cfg): abort("Could not find 's3cmd.cfg' repository at '%(s3cmd_cfg)s'.") if not env_type in _config['deploy']: abort('Could not find "%s" in "deploy" in config file' % env_type) if not "bucket" in _config['deploy'][env_type]: abort('Could not find "bucket" in deploy.%s" in config file' % env_type) if 'usemin_context' in _config['deploy'][env_type]: usemin_context = _config['deploy'][env_type]['usemin_context'] else: usemin_context = None template_path = join(_config['project_path'], 'website', 'templates') deploy_path = join(_config['project_path'], 'build', 'website') clean(deploy_path) # Render templates and run usemin if 'deploy_context' in _config['deploy'][env_type]: deploy_context = _config['deploy'][env_type]['deploy_context'] else: deploy_context = {} # Sometimes we need this path append to import app from website # in render_templates, dunno why: sys.path.append(_config['project_path']) static.render_templates(template_path, deploy_path, deploy_context) static.usemin(_config, [deploy_path], usemin_context) # Copy static files static.copy(_config, [{ "src": join(_config['project_path'], 'website', 'static'), "dst": join(deploy_path, 'static') }]) # Additional copy? if 'copy' in _config['deploy'][env_type]: static.copy(_config, _config['deploy'][env_type]['copy']) @task def put(env_type): """Put (copy) website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('copying to %s' % bucket) # Copy to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_put(deploy_path, bucket) @task def deploy(env_type): """Deploy website to S3 bucket. Specify stg|prd as argument.""" render(env_type) bucket = _config['deploy'][env_type]['bucket'] notice('deploying to %s' % bucket) # Sync to S3 deploy_path = join(_config['project_path'], 'build', 'website') _s3cmd_sync(deploy_path, bucket) @task def serve(ssl='n', port='5000'): """Run the development server""" if not 'project_path' in env: _setup_env() opts = ' -p '+port if do(ssl): opts += ' -s' with lcd(join(env.project_path)): if exists(join(env.project_path, 'manage.py')): local('python manage.py runserver') elif _config and 'deploy' in _config: if int(port) < 1024: local('sudo python website/app.py'+opts) else: local('python website/app.py'+opts) else: if int(port) < 1024: local('sudo python api.py'+opts) else: local('python api.py'+opts) @task def dump(): """Dump env and config (if applicable) to stdout""" import pprint import json notice('dumping env...') pprint.pprint(env) if _config: notice('dumping config...') print _config
random_line_split
secp256k1_recover.rs
//! Public key recovery from [secp256k1] ECDSA signatures. //! //! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 //! //! _This module provides low-level cryptographic building blocks that must be //! used carefully to ensure proper security. Read this documentation and //! accompanying links thoroughly._ //! //! The [`secp256k1_recover`] syscall allows a secp256k1 public key that has //! previously signed a message to be recovered from the combination of the //! message, the signature, and a recovery ID. The recovery ID is generated //! during signing. //! //! Use cases for `secp256k1_recover` include: //! //! - Implementing the Ethereum [`ecrecover`] builtin contract. //! - Performing secp256k1 public key recovery generally. //! - Verifying a single secp256k1 signature. //! //! While `secp256k1_recover` can be used to verify secp256k1 signatures, Solana //! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU //! cost, and can validate many signatures at once. //! //! [sp]: crate::secp256k1_program //! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions use { borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, core::convert::TryFrom, thiserror::Error, }; #[derive(Debug, Clone, PartialEq, Eq, Error)] pub enum Secp256k1RecoverError { #[error("The hash provided to a secp256k1_recover is invalid")] InvalidHash, #[error("The recovery_id provided to a secp256k1_recover is invalid")] InvalidRecoveryId, #[error("The signature provided to a secp256k1_recover is invalid")] InvalidSignature, } impl From<u64> for Secp256k1RecoverError { fn from(v: u64) -> Secp256k1RecoverError { match v { 1 => Secp256k1RecoverError::InvalidHash, 2 => Secp256k1RecoverError::InvalidRecoveryId, 3 => Secp256k1RecoverError::InvalidSignature, _ => panic!("Unsupported Secp256k1RecoverError"), } } } impl From<Secp256k1RecoverError> for u64 { fn from(v: Secp256k1RecoverError) -> u64 { match v { Secp256k1RecoverError::InvalidHash => 1, Secp256k1RecoverError::InvalidRecoveryId => 2, Secp256k1RecoverError::InvalidSignature => 3, } } } pub const SECP256K1_SIGNATURE_LENGTH: usize = 64; pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64; #[repr(transparent)] #[derive( BorshSerialize, BorshDeserialize, BorshSchema, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, AbiExample, )] pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]); impl Secp256k1Pubkey { pub fn new(pubkey_vec: &[u8]) -> Self { Self( <[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec)) .expect("Slice must be the same length as a Pubkey"), ) } pub fn to_bytes(self) -> [u8; 64] { self.0 } } /// Recover the public key from a [secp256k1] ECDSA signature and /// cryptographically-hashed message. /// /// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 /// /// This function is specifically intended for efficiently implementing /// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators. /// It may be useful for other purposes. /// /// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions /// /// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an /// arbitrary message, signed by some public key. /// /// The recovery ID is a value in the range [0, 3] that is generated during /// signing, and allows the recovery process to be more efficent. Note that the /// `recovery_id` here does not directly correspond to an Ethereum recovery ID /// as used in `ecrecover`. This function accepts recovery IDs in the range of /// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert /// an Ethereum recovery ID to a value this function will accept subtract 27 /// from it, checking for underflow. In practice this function will not succeed /// if given a recovery ID of 2 or 3, as these values represent an /// "overflowing" signature, and this function returns an error when parsing /// overflowing signatures. /// /// [`keccak`]: crate::keccak /// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub /// /// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a /// 64-byte secp256k1 public key. This public key corresponds to the secret key /// that previously signed the message `hash` to produce the provided /// `signature`. /// /// While `secp256k1_recover` can be used to verify secp256k1 signatures by /// comparing the recovered key against an expected key, Solana also provides /// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and /// can validate many signatures at once. /// /// [sp]: crate::secp256k1_program /// /// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`] /// crate, which clients may also want to use. /// /// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1 /// /// # Hashing messages /// /// In ECDSA signing and key recovery the signed "message" is always a /// crytographic hash, not the original message itself. If not a cryptographic /// hash, then an adversary can craft signatures that recover to arbitrary /// public keys. This means the caller of this function generally must hash the /// original message themselves and not rely on another party to provide the /// hash. /// /// Ethereum uses the [`keccak`] hash. /// /// # Signature malleability /// /// With the ECDSA signature algorithm it is possible for any party, given a /// valid signature of some message, to create a second signature that is /// equally valid. This is known as _signature malleability_. In many cases this /// is not a concern, but in cases where applications rely on signatures to have /// a unique representation this can be the source of bugs, potentially with /// security implications. /// /// **The solana `secp256k1_recover` function does not prevent signature /// malleability**. This is in contrast to the Bitcoin secp256k1 library, which /// does prevent malleability by default. Solana accepts signatures with `S` /// values that are either in the _high order_ or in the _low order_, and it /// is trivial to produce one from the other. /// /// To prevent signature malleability, it is common for secp256k1 signature /// validators to only accept signatures with low-order `S` values, and reject /// signatures with high-order `S` values. The following code will accomplish /// this: /// /// ```rust /// # use solana_program::program_error::ProgramError; /// # let signature_bytes = [ /// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2, /// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A, /// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB, /// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35, /// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B, /// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C, /// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3, /// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13, /// # ]; /// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// if signature.s.is_high() { /// return Err(ProgramError::InvalidArgument); /// } /// # Ok::<_, ProgramError>(()) /// ``` /// /// This has the downside that the program must link to the [`libsecp256k1`] /// crate and parse the signature just for this check. Note that `libsecp256k1` /// version 0.7.0 or greater is required for running on the Solana SBF target. /// /// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1 /// /// For the most accurate description of signature malleability, and its /// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin /// Core secp256k1 library, the documentation of the [OpenZeppelin `recover` /// method for Solidity][ozr], and [this description of the problem on /// StackExchange][sxr]. /// /// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h /// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes- /// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116 /// /// # Errors /// /// If `hash` is not 32 bytes in length this function returns /// [`Secp256k1RecoverError::InvalidHash`], though see notes /// on SBF-specific behavior below. /// /// If `recovery_id` is not in the range [0, 3] this function returns /// [`Secp256k1RecoverError::InvalidRecoveryId`]. /// /// If `signature` is not 64 bytes in length this function returns /// [`Secp256k1RecoverError::InvalidSignature`], though see notes /// on SBF-specific behavior below. /// /// If `signature` represents an "overflowing" signature this function returns /// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are /// non-standard and should not be encountered in practice. /// /// If `signature` is otherwise invalid this function returns /// [`Secp256k1RecoverError::InvalidSignature`]. /// /// # SBF-specific behavior /// /// When calling this function on-chain the caller must verify the correct /// lengths of `hash` and `signature` beforehand. /// /// When run on-chain this function will not directly validate the lengths of /// `hash` and `signature`. It will assume they are the the correct lengths and /// pass their pointers to the runtime, which will interpret them as 32-byte and /// 64-byte buffers. If the provided slices are too short, the runtime will read /// invalid data and attempt to interpret it, most likely returning an error, /// though in some scenarios it may be possible to incorrectly return /// successfully, or the transaction will abort if the syscall reads data /// outside of the program's memory space. If the provided slices are too long /// then they may be used to "smuggle" uninterpreted data. /// /// # Examples /// /// This example demonstrates recovering a public key and using it to very a /// signature with the `secp256k1_recover` syscall. It has three parts: a Solana /// program, an RPC client to call the program, and common definitions shared /// between the two. /// /// Common definitions: /// /// ``` /// use borsh::{BorshDeserialize, BorshSerialize}; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] /// pub struct DemoSecp256k1RecoverInstruction { /// pub message: Vec<u8>, /// pub signature: [u8; 64], /// pub recovery_id: u8, /// } /// ``` /// /// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse /// the secp256k1 signature to prevent malleability. /// /// ```no_run /// use solana_program::{ /// entrypoint::ProgramResult, /// keccak, msg, /// program_error::ProgramError, /// secp256k1_recover::secp256k1_recover, /// }; /// /// /// The key we expect to sign secp256k1 messages, /// /// as serialized by `libsecp256k1::PublicKey::serialize`. /// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [ /// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D, /// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE, /// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F, /// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7, /// ]; /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec<u8>, /// # pub signature: [u8; 64], /// # pub recovery_id: u8, /// # } /// /// pub fn process_secp256k1_recover( /// instruction: DemoSecp256k1RecoverInstruction, /// ) -> ProgramResult { /// // The secp256k1 recovery operation accepts a cryptographically-hashed /// // message only. Passing it anything else is insecure and allows signatures /// // to be forged. /// // /// // This means that the code calling `secp256k1_recover` must perform the hash /// // itself, and not assume that data passed to it has been properly hashed. /// let message_hash = { /// let mut hasher = keccak::Hasher::default(); /// hasher.hash(&instruction.message); /// hasher.result() /// }; /// /// // Reject high-s value signatures to prevent malleability. /// // Solana does not do this itself. /// // This may or may not be necessary depending on use case. /// { /// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// if signature.s.is_high() { /// msg!("signature with high-s value"); /// return Err(ProgramError::InvalidArgument); /// } /// } /// /// let recovered_pubkey = secp256k1_recover( /// &message_hash.0, /// instruction.recovery_id, /// &instruction.signature, /// ) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// // If we're using this function for signature verification then we /// // need to check the pubkey is an expected value. /// // Here we are checking the secp256k1 pubkey against a known authorized pubkey. /// if recovered_pubkey.0 != AUTHORIZED_PUBLIC_KEY { /// return Err(ProgramError::InvalidArgument); /// } /// /// Ok(()) /// } /// ``` /// /// The RPC client program: /// /// ```no_run /// # use solana_program::example_mocks::solana_rpc_client; /// # use solana_program::example_mocks::solana_sdk; /// use anyhow::Result; /// use solana_rpc_client::rpc_client::RpcClient; /// use solana_sdk::{ /// instruction::Instruction, /// keccak, /// pubkey::Pubkey, /// signature::{Keypair, Signer}, /// transaction::Transaction, /// }; /// # use borsh::{BorshDeserialize, BorshSerialize}; /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec<u8>, /// # pub signature: [u8; 64], /// # pub recovery_id: u8, /// # } /// /// pub fn demo_secp256k1_recover( /// payer_keypair: &Keypair, /// secp256k1_secret_key: &libsecp256k1::SecretKey, /// client: &RpcClient, /// program_keypair: &Keypair, /// ) -> Result<()> { /// let message = b"hello world"; /// let message_hash = { /// let mut hasher = keccak::Hasher::default(); /// hasher.hash(message); /// hasher.result() /// }; /// /// let secp_message = libsecp256k1::Message::parse(&message_hash.0); /// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key); /// /// let signature = signature.serialize(); /// /// let instr = DemoSecp256k1RecoverInstruction { /// message: message.to_vec(), /// signature, /// recovery_id: recovery_id.serialize(), /// }; /// let instr = Instruction::new_with_borsh( /// program_keypair.pubkey(), /// &instr, /// vec![], /// ); /// /// let blockhash = client.get_latest_blockhash()?; /// let tx = Transaction::new_signed_with_payer( /// &[instr], /// Some(&payer_keypair.pubkey()), /// &[payer_keypair], /// blockhash, /// );
/// client.send_and_confirm_transaction(&tx)?; /// /// Ok(()) /// } /// ``` pub fn secp256k1_recover( hash: &[u8], recovery_id: u8, signature: &[u8], ) -> Result<Secp256k1Pubkey, Secp256k1RecoverError> { #[cfg(target_os = "solana")] { let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH]; let result = unsafe { crate::syscalls::sol_secp256k1_recover( hash.as_ptr(), recovery_id as u64, signature.as_ptr(), pubkey_buffer.as_mut_ptr(), ) }; match result { 0 => Ok(Secp256k1Pubkey::new(&pubkey_buffer)), error => Err(Secp256k1RecoverError::from(error)), } } #[cfg(not(target_os = "solana"))] { let message = libsecp256k1::Message::parse_slice(hash) .map_err(|_| Secp256k1RecoverError::InvalidHash)?; let recovery_id = libsecp256k1::RecoveryId::parse(recovery_id) .map_err(|_| Secp256k1RecoverError::InvalidRecoveryId)?; let signature = libsecp256k1::Signature::parse_standard_slice(signature) .map_err(|_| Secp256k1RecoverError::InvalidSignature)?; let secp256k1_key = libsecp256k1::recover(&message, &signature, &recovery_id) .map_err(|_| Secp256k1RecoverError::InvalidSignature)?; Ok(Secp256k1Pubkey::new(&secp256k1_key.serialize()[1..65])) } }
///
random_line_split
secp256k1_recover.rs
//! Public key recovery from [secp256k1] ECDSA signatures. //! //! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 //! //! _This module provides low-level cryptographic building blocks that must be //! used carefully to ensure proper security. Read this documentation and //! accompanying links thoroughly._ //! //! The [`secp256k1_recover`] syscall allows a secp256k1 public key that has //! previously signed a message to be recovered from the combination of the //! message, the signature, and a recovery ID. The recovery ID is generated //! during signing. //! //! Use cases for `secp256k1_recover` include: //! //! - Implementing the Ethereum [`ecrecover`] builtin contract. //! - Performing secp256k1 public key recovery generally. //! - Verifying a single secp256k1 signature. //! //! While `secp256k1_recover` can be used to verify secp256k1 signatures, Solana //! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU //! cost, and can validate many signatures at once. //! //! [sp]: crate::secp256k1_program //! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions use { borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, core::convert::TryFrom, thiserror::Error, }; #[derive(Debug, Clone, PartialEq, Eq, Error)] pub enum Secp256k1RecoverError { #[error("The hash provided to a secp256k1_recover is invalid")] InvalidHash, #[error("The recovery_id provided to a secp256k1_recover is invalid")] InvalidRecoveryId, #[error("The signature provided to a secp256k1_recover is invalid")] InvalidSignature, } impl From<u64> for Secp256k1RecoverError { fn from(v: u64) -> Secp256k1RecoverError { match v { 1 => Secp256k1RecoverError::InvalidHash, 2 => Secp256k1RecoverError::InvalidRecoveryId, 3 => Secp256k1RecoverError::InvalidSignature, _ => panic!("Unsupported Secp256k1RecoverError"), } } } impl From<Secp256k1RecoverError> for u64 { fn from(v: Secp256k1RecoverError) -> u64 { match v { Secp256k1RecoverError::InvalidHash => 1, Secp256k1RecoverError::InvalidRecoveryId => 2, Secp256k1RecoverError::InvalidSignature => 3, } } } pub const SECP256K1_SIGNATURE_LENGTH: usize = 64; pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64; #[repr(transparent)] #[derive( BorshSerialize, BorshDeserialize, BorshSchema, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, AbiExample, )] pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]); impl Secp256k1Pubkey { pub fn new(pubkey_vec: &[u8]) -> Self { Self( <[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec)) .expect("Slice must be the same length as a Pubkey"), ) } pub fn to_bytes(self) -> [u8; 64] { self.0 } } /// Recover the public key from a [secp256k1] ECDSA signature and /// cryptographically-hashed message. /// /// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 /// /// This function is specifically intended for efficiently implementing /// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators. /// It may be useful for other purposes. /// /// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions /// /// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an /// arbitrary message, signed by some public key. /// /// The recovery ID is a value in the range [0, 3] that is generated during /// signing, and allows the recovery process to be more efficent. Note that the /// `recovery_id` here does not directly correspond to an Ethereum recovery ID /// as used in `ecrecover`. This function accepts recovery IDs in the range of /// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert /// an Ethereum recovery ID to a value this function will accept subtract 27 /// from it, checking for underflow. In practice this function will not succeed /// if given a recovery ID of 2 or 3, as these values represent an /// "overflowing" signature, and this function returns an error when parsing /// overflowing signatures. /// /// [`keccak`]: crate::keccak /// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub /// /// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a /// 64-byte secp256k1 public key. This public key corresponds to the secret key /// that previously signed the message `hash` to produce the provided /// `signature`. /// /// While `secp256k1_recover` can be used to verify secp256k1 signatures by /// comparing the recovered key against an expected key, Solana also provides /// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and /// can validate many signatures at once. /// /// [sp]: crate::secp256k1_program /// /// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`] /// crate, which clients may also want to use. /// /// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1 /// /// # Hashing messages /// /// In ECDSA signing and key recovery the signed "message" is always a /// crytographic hash, not the original message itself. If not a cryptographic /// hash, then an adversary can craft signatures that recover to arbitrary /// public keys. This means the caller of this function generally must hash the /// original message themselves and not rely on another party to provide the /// hash. /// /// Ethereum uses the [`keccak`] hash. /// /// # Signature malleability /// /// With the ECDSA signature algorithm it is possible for any party, given a /// valid signature of some message, to create a second signature that is /// equally valid. This is known as _signature malleability_. In many cases this /// is not a concern, but in cases where applications rely on signatures to have /// a unique representation this can be the source of bugs, potentially with /// security implications. /// /// **The solana `secp256k1_recover` function does not prevent signature /// malleability**. This is in contrast to the Bitcoin secp256k1 library, which /// does prevent malleability by default. Solana accepts signatures with `S` /// values that are either in the _high order_ or in the _low order_, and it /// is trivial to produce one from the other. /// /// To prevent signature malleability, it is common for secp256k1 signature /// validators to only accept signatures with low-order `S` values, and reject /// signatures with high-order `S` values. The following code will accomplish /// this: /// /// ```rust /// # use solana_program::program_error::ProgramError; /// # let signature_bytes = [ /// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2, /// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A, /// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB, /// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35, /// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B, /// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C, /// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3, /// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13, /// # ]; /// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// if signature.s.is_high() { /// return Err(ProgramError::InvalidArgument); /// } /// # Ok::<_, ProgramError>(()) /// ``` /// /// This has the downside that the program must link to the [`libsecp256k1`] /// crate and parse the signature just for this check. Note that `libsecp256k1` /// version 0.7.0 or greater is required for running on the Solana SBF target. /// /// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1 /// /// For the most accurate description of signature malleability, and its /// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin /// Core secp256k1 library, the documentation of the [OpenZeppelin `recover` /// method for Solidity][ozr], and [this description of the problem on /// StackExchange][sxr]. /// /// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h /// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes- /// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116 /// /// # Errors /// /// If `hash` is not 32 bytes in length this function returns /// [`Secp256k1RecoverError::InvalidHash`], though see notes /// on SBF-specific behavior below. /// /// If `recovery_id` is not in the range [0, 3] this function returns /// [`Secp256k1RecoverError::InvalidRecoveryId`]. /// /// If `signature` is not 64 bytes in length this function returns /// [`Secp256k1RecoverError::InvalidSignature`], though see notes /// on SBF-specific behavior below. /// /// If `signature` represents an "overflowing" signature this function returns /// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are /// non-standard and should not be encountered in practice. /// /// If `signature` is otherwise invalid this function returns /// [`Secp256k1RecoverError::InvalidSignature`]. /// /// # SBF-specific behavior /// /// When calling this function on-chain the caller must verify the correct /// lengths of `hash` and `signature` beforehand. /// /// When run on-chain this function will not directly validate the lengths of /// `hash` and `signature`. It will assume they are the the correct lengths and /// pass their pointers to the runtime, which will interpret them as 32-byte and /// 64-byte buffers. If the provided slices are too short, the runtime will read /// invalid data and attempt to interpret it, most likely returning an error, /// though in some scenarios it may be possible to incorrectly return /// successfully, or the transaction will abort if the syscall reads data /// outside of the program's memory space. If the provided slices are too long /// then they may be used to "smuggle" uninterpreted data. /// /// # Examples /// /// This example demonstrates recovering a public key and using it to very a /// signature with the `secp256k1_recover` syscall. It has three parts: a Solana /// program, an RPC client to call the program, and common definitions shared /// between the two. /// /// Common definitions: /// /// ``` /// use borsh::{BorshDeserialize, BorshSerialize}; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] /// pub struct DemoSecp256k1RecoverInstruction { /// pub message: Vec<u8>, /// pub signature: [u8; 64], /// pub recovery_id: u8, /// } /// ``` /// /// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse /// the secp256k1 signature to prevent malleability. /// /// ```no_run /// use solana_program::{ /// entrypoint::ProgramResult, /// keccak, msg, /// program_error::ProgramError, /// secp256k1_recover::secp256k1_recover, /// }; /// /// /// The key we expect to sign secp256k1 messages, /// /// as serialized by `libsecp256k1::PublicKey::serialize`. /// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [ /// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D, /// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE, /// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F, /// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7, /// ]; /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec<u8>, /// # pub signature: [u8; 64], /// # pub recovery_id: u8, /// # } /// /// pub fn process_secp256k1_recover( /// instruction: DemoSecp256k1RecoverInstruction, /// ) -> ProgramResult { /// // The secp256k1 recovery operation accepts a cryptographically-hashed /// // message only. Passing it anything else is insecure and allows signatures /// // to be forged. /// // /// // This means that the code calling `secp256k1_recover` must perform the hash /// // itself, and not assume that data passed to it has been properly hashed. /// let message_hash = { /// let mut hasher = keccak::Hasher::default(); /// hasher.hash(&instruction.message); /// hasher.result() /// }; /// /// // Reject high-s value signatures to prevent malleability. /// // Solana does not do this itself. /// // This may or may not be necessary depending on use case. /// { /// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// if signature.s.is_high() { /// msg!("signature with high-s value"); /// return Err(ProgramError::InvalidArgument); /// } /// } /// /// let recovered_pubkey = secp256k1_recover( /// &message_hash.0, /// instruction.recovery_id, /// &instruction.signature, /// ) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// // If we're using this function for signature verification then we /// // need to check the pubkey is an expected value. /// // Here we are checking the secp256k1 pubkey against a known authorized pubkey. /// if recovered_pubkey.0 != AUTHORIZED_PUBLIC_KEY { /// return Err(ProgramError::InvalidArgument); /// } /// /// Ok(()) /// } /// ``` /// /// The RPC client program: /// /// ```no_run /// # use solana_program::example_mocks::solana_rpc_client; /// # use solana_program::example_mocks::solana_sdk; /// use anyhow::Result; /// use solana_rpc_client::rpc_client::RpcClient; /// use solana_sdk::{ /// instruction::Instruction, /// keccak, /// pubkey::Pubkey, /// signature::{Keypair, Signer}, /// transaction::Transaction, /// }; /// # use borsh::{BorshDeserialize, BorshSerialize}; /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec<u8>, /// # pub signature: [u8; 64], /// # pub recovery_id: u8, /// # } /// /// pub fn demo_secp256k1_recover( /// payer_keypair: &Keypair, /// secp256k1_secret_key: &libsecp256k1::SecretKey, /// client: &RpcClient, /// program_keypair: &Keypair, /// ) -> Result<()> { /// let message = b"hello world"; /// let message_hash = { /// let mut hasher = keccak::Hasher::default(); /// hasher.hash(message); /// hasher.result() /// }; /// /// let secp_message = libsecp256k1::Message::parse(&message_hash.0); /// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key); /// /// let signature = signature.serialize(); /// /// let instr = DemoSecp256k1RecoverInstruction { /// message: message.to_vec(), /// signature, /// recovery_id: recovery_id.serialize(), /// }; /// let instr = Instruction::new_with_borsh( /// program_keypair.pubkey(), /// &instr, /// vec![], /// ); /// /// let blockhash = client.get_latest_blockhash()?; /// let tx = Transaction::new_signed_with_payer( /// &[instr], /// Some(&payer_keypair.pubkey()), /// &[payer_keypair], /// blockhash, /// ); /// /// client.send_and_confirm_transaction(&tx)?; /// /// Ok(()) /// } /// ``` pub fn
( hash: &[u8], recovery_id: u8, signature: &[u8], ) -> Result<Secp256k1Pubkey, Secp256k1RecoverError> { #[cfg(target_os = "solana")] { let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH]; let result = unsafe { crate::syscalls::sol_secp256k1_recover( hash.as_ptr(), recovery_id as u64, signature.as_ptr(), pubkey_buffer.as_mut_ptr(), ) }; match result { 0 => Ok(Secp256k1Pubkey::new(&pubkey_buffer)), error => Err(Secp256k1RecoverError::from(error)), } } #[cfg(not(target_os = "solana"))] { let message = libsecp256k1::Message::parse_slice(hash) .map_err(|_| Secp256k1RecoverError::InvalidHash)?; let recovery_id = libsecp256k1::RecoveryId::parse(recovery_id) .map_err(|_| Secp256k1RecoverError::InvalidRecoveryId)?; let signature = libsecp256k1::Signature::parse_standard_slice(signature) .map_err(|_| Secp256k1RecoverError::InvalidSignature)?; let secp256k1_key = libsecp256k1::recover(&message, &signature, &recovery_id) .map_err(|_| Secp256k1RecoverError::InvalidSignature)?; Ok(Secp256k1Pubkey::new(&secp256k1_key.serialize()[1..65])) } }
secp256k1_recover
identifier_name
secp256k1_recover.rs
//! Public key recovery from [secp256k1] ECDSA signatures. //! //! [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 //! //! _This module provides low-level cryptographic building blocks that must be //! used carefully to ensure proper security. Read this documentation and //! accompanying links thoroughly._ //! //! The [`secp256k1_recover`] syscall allows a secp256k1 public key that has //! previously signed a message to be recovered from the combination of the //! message, the signature, and a recovery ID. The recovery ID is generated //! during signing. //! //! Use cases for `secp256k1_recover` include: //! //! - Implementing the Ethereum [`ecrecover`] builtin contract. //! - Performing secp256k1 public key recovery generally. //! - Verifying a single secp256k1 signature. //! //! While `secp256k1_recover` can be used to verify secp256k1 signatures, Solana //! also provides the [secp256k1 program][sp], which is more flexible, has lower CPU //! cost, and can validate many signatures at once. //! //! [sp]: crate::secp256k1_program //! [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions use { borsh::{BorshDeserialize, BorshSchema, BorshSerialize}, core::convert::TryFrom, thiserror::Error, }; #[derive(Debug, Clone, PartialEq, Eq, Error)] pub enum Secp256k1RecoverError { #[error("The hash provided to a secp256k1_recover is invalid")] InvalidHash, #[error("The recovery_id provided to a secp256k1_recover is invalid")] InvalidRecoveryId, #[error("The signature provided to a secp256k1_recover is invalid")] InvalidSignature, } impl From<u64> for Secp256k1RecoverError { fn from(v: u64) -> Secp256k1RecoverError { match v { 1 => Secp256k1RecoverError::InvalidHash, 2 => Secp256k1RecoverError::InvalidRecoveryId, 3 => Secp256k1RecoverError::InvalidSignature, _ => panic!("Unsupported Secp256k1RecoverError"), } } } impl From<Secp256k1RecoverError> for u64 { fn from(v: Secp256k1RecoverError) -> u64 { match v { Secp256k1RecoverError::InvalidHash => 1, Secp256k1RecoverError::InvalidRecoveryId => 2, Secp256k1RecoverError::InvalidSignature => 3, } } } pub const SECP256K1_SIGNATURE_LENGTH: usize = 64; pub const SECP256K1_PUBLIC_KEY_LENGTH: usize = 64; #[repr(transparent)] #[derive( BorshSerialize, BorshDeserialize, BorshSchema, Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash, AbiExample, )] pub struct Secp256k1Pubkey(pub [u8; SECP256K1_PUBLIC_KEY_LENGTH]); impl Secp256k1Pubkey { pub fn new(pubkey_vec: &[u8]) -> Self
pub fn to_bytes(self) -> [u8; 64] { self.0 } } /// Recover the public key from a [secp256k1] ECDSA signature and /// cryptographically-hashed message. /// /// [secp256k1]: https://en.bitcoin.it/wiki/Secp256k1 /// /// This function is specifically intended for efficiently implementing /// Ethereum's [`ecrecover`] builtin contract, for use by Ethereum integrators. /// It may be useful for other purposes. /// /// [`ecrecover`]: https://docs.soliditylang.org/en/v0.8.14/units-and-global-variables.html?highlight=ecrecover#mathematical-and-cryptographic-functions /// /// `hash` is the 32-byte cryptographic hash (typically [`keccak`]) of an /// arbitrary message, signed by some public key. /// /// The recovery ID is a value in the range [0, 3] that is generated during /// signing, and allows the recovery process to be more efficent. Note that the /// `recovery_id` here does not directly correspond to an Ethereum recovery ID /// as used in `ecrecover`. This function accepts recovery IDs in the range of /// [0, 3], while Ethereum's recovery IDs have a value of 27 or 28. To convert /// an Ethereum recovery ID to a value this function will accept subtract 27 /// from it, checking for underflow. In practice this function will not succeed /// if given a recovery ID of 2 or 3, as these values represent an /// "overflowing" signature, and this function returns an error when parsing /// overflowing signatures. /// /// [`keccak`]: crate::keccak /// [`wrapping_sub`]: https://doc.rust-lang.org/std/primitive.u8.html#method.wrapping_sub /// /// On success this function returns a [`Secp256k1Pubkey`], a wrapper around a /// 64-byte secp256k1 public key. This public key corresponds to the secret key /// that previously signed the message `hash` to produce the provided /// `signature`. /// /// While `secp256k1_recover` can be used to verify secp256k1 signatures by /// comparing the recovered key against an expected key, Solana also provides /// the [secp256k1 program][sp], which is more flexible, has lower CPU cost, and /// can validate many signatures at once. /// /// [sp]: crate::secp256k1_program /// /// The `secp256k1_recover` syscall is implemented with the [`libsecp256k1`] /// crate, which clients may also want to use. /// /// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1 /// /// # Hashing messages /// /// In ECDSA signing and key recovery the signed "message" is always a /// crytographic hash, not the original message itself. If not a cryptographic /// hash, then an adversary can craft signatures that recover to arbitrary /// public keys. This means the caller of this function generally must hash the /// original message themselves and not rely on another party to provide the /// hash. /// /// Ethereum uses the [`keccak`] hash. /// /// # Signature malleability /// /// With the ECDSA signature algorithm it is possible for any party, given a /// valid signature of some message, to create a second signature that is /// equally valid. This is known as _signature malleability_. In many cases this /// is not a concern, but in cases where applications rely on signatures to have /// a unique representation this can be the source of bugs, potentially with /// security implications. /// /// **The solana `secp256k1_recover` function does not prevent signature /// malleability**. This is in contrast to the Bitcoin secp256k1 library, which /// does prevent malleability by default. Solana accepts signatures with `S` /// values that are either in the _high order_ or in the _low order_, and it /// is trivial to produce one from the other. /// /// To prevent signature malleability, it is common for secp256k1 signature /// validators to only accept signatures with low-order `S` values, and reject /// signatures with high-order `S` values. The following code will accomplish /// this: /// /// ```rust /// # use solana_program::program_error::ProgramError; /// # let signature_bytes = [ /// # 0x83, 0x55, 0x81, 0xDF, 0xB1, 0x02, 0xA7, 0xD2, /// # 0x2D, 0x33, 0xA4, 0x07, 0xDD, 0x7E, 0xFA, 0x9A, /// # 0xE8, 0x5F, 0x42, 0x6B, 0x2A, 0x05, 0xBB, 0xFB, /// # 0xA1, 0xAE, 0x93, 0x84, 0x46, 0x48, 0xE3, 0x35, /// # 0x74, 0xE1, 0x6D, 0xB4, 0xD0, 0x2D, 0xB2, 0x0B, /// # 0x3C, 0x89, 0x8D, 0x0A, 0x44, 0xDF, 0x73, 0x9C, /// # 0x1E, 0xBF, 0x06, 0x8E, 0x8A, 0x9F, 0xA9, 0xC3, /// # 0xA5, 0xEA, 0x21, 0xAC, 0xED, 0x5B, 0x22, 0x13, /// # ]; /// let signature = libsecp256k1::Signature::parse_standard_slice(&signature_bytes) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// if signature.s.is_high() { /// return Err(ProgramError::InvalidArgument); /// } /// # Ok::<_, ProgramError>(()) /// ``` /// /// This has the downside that the program must link to the [`libsecp256k1`] /// crate and parse the signature just for this check. Note that `libsecp256k1` /// version 0.7.0 or greater is required for running on the Solana SBF target. /// /// [`libsecp256k1`]: https://docs.rs/libsecp256k1/latest/libsecp256k1 /// /// For the most accurate description of signature malleability, and its /// prevention in secp256k1, refer to comments in [`secp256k1.h`] in the Bitcoin /// Core secp256k1 library, the documentation of the [OpenZeppelin `recover` /// method for Solidity][ozr], and [this description of the problem on /// StackExchange][sxr]. /// /// [`secp256k1.h`]: https://github.com/bitcoin-core/secp256k1/blob/44c2452fd387f7ca604ab42d73746e7d3a44d8a2/include/secp256k1.h /// [ozr]: https://docs.openzeppelin.com/contracts/2.x/api/cryptography#ECDSA-recover-bytes32-bytes- /// [sxr]: https://bitcoin.stackexchange.com/questions/81115/if-someone-wanted-to-pretend-to-be-satoshi-by-posting-a-fake-signature-to-defrau/81116#81116 /// /// # Errors /// /// If `hash` is not 32 bytes in length this function returns /// [`Secp256k1RecoverError::InvalidHash`], though see notes /// on SBF-specific behavior below. /// /// If `recovery_id` is not in the range [0, 3] this function returns /// [`Secp256k1RecoverError::InvalidRecoveryId`]. /// /// If `signature` is not 64 bytes in length this function returns /// [`Secp256k1RecoverError::InvalidSignature`], though see notes /// on SBF-specific behavior below. /// /// If `signature` represents an "overflowing" signature this function returns /// [`Secp256k1RecoverError::InvalidSignature`]. Overflowing signatures are /// non-standard and should not be encountered in practice. /// /// If `signature` is otherwise invalid this function returns /// [`Secp256k1RecoverError::InvalidSignature`]. /// /// # SBF-specific behavior /// /// When calling this function on-chain the caller must verify the correct /// lengths of `hash` and `signature` beforehand. /// /// When run on-chain this function will not directly validate the lengths of /// `hash` and `signature`. It will assume they are the the correct lengths and /// pass their pointers to the runtime, which will interpret them as 32-byte and /// 64-byte buffers. If the provided slices are too short, the runtime will read /// invalid data and attempt to interpret it, most likely returning an error, /// though in some scenarios it may be possible to incorrectly return /// successfully, or the transaction will abort if the syscall reads data /// outside of the program's memory space. If the provided slices are too long /// then they may be used to "smuggle" uninterpreted data. /// /// # Examples /// /// This example demonstrates recovering a public key and using it to very a /// signature with the `secp256k1_recover` syscall. It has three parts: a Solana /// program, an RPC client to call the program, and common definitions shared /// between the two. /// /// Common definitions: /// /// ``` /// use borsh::{BorshDeserialize, BorshSerialize}; /// /// #[derive(BorshSerialize, BorshDeserialize, Debug)] /// pub struct DemoSecp256k1RecoverInstruction { /// pub message: Vec<u8>, /// pub signature: [u8; 64], /// pub recovery_id: u8, /// } /// ``` /// /// The Solana program. Note that it uses `libsecp256k1` version 0.7.0 to parse /// the secp256k1 signature to prevent malleability. /// /// ```no_run /// use solana_program::{ /// entrypoint::ProgramResult, /// keccak, msg, /// program_error::ProgramError, /// secp256k1_recover::secp256k1_recover, /// }; /// /// /// The key we expect to sign secp256k1 messages, /// /// as serialized by `libsecp256k1::PublicKey::serialize`. /// const AUTHORIZED_PUBLIC_KEY: [u8; 64] = [ /// 0x8C, 0xD6, 0x47, 0xF8, 0xA5, 0xBF, 0x59, 0xA0, 0x4F, 0x77, 0xFA, 0xFA, 0x6C, 0xA0, 0xE6, 0x4D, /// 0x94, 0x5B, 0x46, 0x55, 0xA6, 0x2B, 0xB0, 0x6F, 0x10, 0x4C, 0x9E, 0x2C, 0x6F, 0x42, 0x0A, 0xBE, /// 0x18, 0xDF, 0x0B, 0xF0, 0x87, 0x42, 0xBA, 0x88, 0xB4, 0xCF, 0x87, 0x5A, 0x35, 0x27, 0xBE, 0x0F, /// 0x45, 0xAE, 0xFC, 0x66, 0x9C, 0x2C, 0x6B, 0xF3, 0xEF, 0xCA, 0x5C, 0x32, 0x11, 0xF7, 0x2A, 0xC7, /// ]; /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec<u8>, /// # pub signature: [u8; 64], /// # pub recovery_id: u8, /// # } /// /// pub fn process_secp256k1_recover( /// instruction: DemoSecp256k1RecoverInstruction, /// ) -> ProgramResult { /// // The secp256k1 recovery operation accepts a cryptographically-hashed /// // message only. Passing it anything else is insecure and allows signatures /// // to be forged. /// // /// // This means that the code calling `secp256k1_recover` must perform the hash /// // itself, and not assume that data passed to it has been properly hashed. /// let message_hash = { /// let mut hasher = keccak::Hasher::default(); /// hasher.hash(&instruction.message); /// hasher.result() /// }; /// /// // Reject high-s value signatures to prevent malleability. /// // Solana does not do this itself. /// // This may or may not be necessary depending on use case. /// { /// let signature = libsecp256k1::Signature::parse_standard_slice(&instruction.signature) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// if signature.s.is_high() { /// msg!("signature with high-s value"); /// return Err(ProgramError::InvalidArgument); /// } /// } /// /// let recovered_pubkey = secp256k1_recover( /// &message_hash.0, /// instruction.recovery_id, /// &instruction.signature, /// ) /// .map_err(|_| ProgramError::InvalidArgument)?; /// /// // If we're using this function for signature verification then we /// // need to check the pubkey is an expected value. /// // Here we are checking the secp256k1 pubkey against a known authorized pubkey. /// if recovered_pubkey.0 != AUTHORIZED_PUBLIC_KEY { /// return Err(ProgramError::InvalidArgument); /// } /// /// Ok(()) /// } /// ``` /// /// The RPC client program: /// /// ```no_run /// # use solana_program::example_mocks::solana_rpc_client; /// # use solana_program::example_mocks::solana_sdk; /// use anyhow::Result; /// use solana_rpc_client::rpc_client::RpcClient; /// use solana_sdk::{ /// instruction::Instruction, /// keccak, /// pubkey::Pubkey, /// signature::{Keypair, Signer}, /// transaction::Transaction, /// }; /// # use borsh::{BorshDeserialize, BorshSerialize}; /// # #[derive(BorshSerialize, BorshDeserialize, Debug)] /// # pub struct DemoSecp256k1RecoverInstruction { /// # pub message: Vec<u8>, /// # pub signature: [u8; 64], /// # pub recovery_id: u8, /// # } /// /// pub fn demo_secp256k1_recover( /// payer_keypair: &Keypair, /// secp256k1_secret_key: &libsecp256k1::SecretKey, /// client: &RpcClient, /// program_keypair: &Keypair, /// ) -> Result<()> { /// let message = b"hello world"; /// let message_hash = { /// let mut hasher = keccak::Hasher::default(); /// hasher.hash(message); /// hasher.result() /// }; /// /// let secp_message = libsecp256k1::Message::parse(&message_hash.0); /// let (signature, recovery_id) = libsecp256k1::sign(&secp_message, &secp256k1_secret_key); /// /// let signature = signature.serialize(); /// /// let instr = DemoSecp256k1RecoverInstruction { /// message: message.to_vec(), /// signature, /// recovery_id: recovery_id.serialize(), /// }; /// let instr = Instruction::new_with_borsh( /// program_keypair.pubkey(), /// &instr, /// vec![], /// ); /// /// let blockhash = client.get_latest_blockhash()?; /// let tx = Transaction::new_signed_with_payer( /// &[instr], /// Some(&payer_keypair.pubkey()), /// &[payer_keypair], /// blockhash, /// ); /// /// client.send_and_confirm_transaction(&tx)?; /// /// Ok(()) /// } /// ``` pub fn secp256k1_recover( hash: &[u8], recovery_id: u8, signature: &[u8], ) -> Result<Secp256k1Pubkey, Secp256k1RecoverError> { #[cfg(target_os = "solana")] { let mut pubkey_buffer = [0u8; SECP256K1_PUBLIC_KEY_LENGTH]; let result = unsafe { crate::syscalls::sol_secp256k1_recover( hash.as_ptr(), recovery_id as u64, signature.as_ptr(), pubkey_buffer.as_mut_ptr(), ) }; match result { 0 => Ok(Secp256k1Pubkey::new(&pubkey_buffer)), error => Err(Secp256k1RecoverError::from(error)), } } #[cfg(not(target_os = "solana"))] { let message = libsecp256k1::Message::parse_slice(hash) .map_err(|_| Secp256k1RecoverError::InvalidHash)?; let recovery_id = libsecp256k1::RecoveryId::parse(recovery_id) .map_err(|_| Secp256k1RecoverError::InvalidRecoveryId)?; let signature = libsecp256k1::Signature::parse_standard_slice(signature) .map_err(|_| Secp256k1RecoverError::InvalidSignature)?; let secp256k1_key = libsecp256k1::recover(&message, &signature, &recovery_id) .map_err(|_| Secp256k1RecoverError::InvalidSignature)?; Ok(Secp256k1Pubkey::new(&secp256k1_key.serialize()[1..65])) } }
{ Self( <[u8; SECP256K1_PUBLIC_KEY_LENGTH]>::try_from(<&[u8]>::clone(&pubkey_vec)) .expect("Slice must be the same length as a Pubkey"), ) }
identifier_body
VismoController.js
/*requires VismoShapes Adds controls such as panning and zooming to a given dom element. Mousewheel zooming currently not working as should - should center on location where mousewheel occurs Will be changed to take a handler parameter rather then a targetjs */ var VismoController = function(elem,options){ //elem must have style.width and style.height etM if(elem.length){ //for jquery var result = []; for(var i=0; i < elem.length; i++){ var x = new VismoController(elem[i],options); result.push(x); } return x; } if(!options)options = {}; if(!options.zoomfactor)options.zoomfactor=2; if(elem.vismoController) throw "this already has a vismo controller!" elem.vismoController = true;// this; this.enabledControls = []; if(typeof elem == 'string') elem= document.getElementById(elem); this.setLimits({}); //jQuery(elem).css() //if(!elem.style || !elem.style.position) elem.style.position = "relative"; this.wrapper = elem; //a dom element to detect mouse actions this.handler = options.handler; //a js object to run actions on (with pan and zoom functions) this.defaultCursor = ""; var md = elem.onmousedown; var mu = elem.onmouseup; var mm = elem.onmousemove; for(var i=0; i < elem.childNodes.length; i++){ var child = elem.childNodes[i]; try{ child.onmousedown = function(e){if(md)md(e);} child.onmouseup = function(e){if(mu)mu(e);} child.onmousemove = function(e){if(mm)mm(e);} } catch(e){ } } controlDiv = document.createElement('div'); controlDiv.style.position = "absolute"; controlDiv.style.top = "0"; controlDiv.style.left = "0"; controlDiv.className = 'vismoControls'; jQuery(controlDiv).css({'z-index':10000, height:"120px",width:"60px"}); this.wrapper.appendChild(controlDiv); this.controlDiv = controlDiv; this.controlCanvas = new VismoCanvas(this.controlDiv); jQuery(this.controlDiv).mouseover(function(e){e.stopPropagation();e.preventDefault();}); //this.controlDiv.vismoController = this; var vismoController = this; var preventDef = function(e){ if(e.button == 2) return true; if (e && e.stopPropagation) //if stopPropagation method supported e.stopPropagation() else e.cancelBubble=true return false; }; var that = this; var f = function(e,s){ var vismoController = that; vismoController._panzoomClickHandler(e,s,vismoController); return preventDef(e); }; this.controlCanvas.mouse({up:preventDef,down:f,dblclick:preventDef}); //this.wrapper.vismoController = this; var start_transformation = options.transformation; if(start_transformation){ if(!start_transformation.origin) start_transformation.origin = {}; this.transformation = start_transformation; } else{ this.transformation = {'translate':{x:0,y:0}, 'scale': {x:1, y:1},'rotate': {x:0,y:0,z:0},origin:{}}; } this.transformation.origin.x = jQuery(elem).width() / 2; this.transformation.origin.y = jQuery(elem).height() / 2; var t = this.transformation; //looks for specifically named function in targetjs if(!this.handler) { alert("no transform handler function defined"); } //this.wrapper.vismoController = this; this.enabled = true; if(!options) options = {}; if(!options.controls)options.controls =['pan','zoom','mousepanning','mousewheelzooming']; this.options = options; if(!this.options.controlStroke){ this.options.controlStroke = "#000000"; } if(!this.options.controlFill){ this.options.controlFill = "rgba(150,150,150,0.7)"; } this.addControls(this.options.controls); this.limits = {scale:{}}; if(this.options.maxZoom) { this.limits.scale.x =this.options.maxZoom; this.limits.scale.y = this.options.maxZoom; } if(this.options.minZoom){ this.limits.scale.minx =this.options.minZoom; this.limits.scale.miny =this.options.minZoom; } this.pansensitivity =100; if(this.options.pansensitivity){ this.pansensitivity =this.options.pansensitivity; } jQuery(window).unload(function(){ that.controlCanvas = null; that.controlDiv = null; }) }; VismoController.prototype = { setLimits: function(transformation){ this.limits = transformation; } ,getHandler: function(){ return this.handler; } ,setHandler: function(handler){ this.handler = handler; handler(this.transformation); } ,getEnabledControls: function(){ return this.enabledControls; } ,_addEnabledControl: function(controlName){ this.enabledControls.push(controlName); } ,applyLayer: function(){ var that = this; var hidebuttons = function(){ var shapes = that.controlCanvas.getMemory(); for(var i=0; i < shapes.length; i++){ shapes[i].setProperty("hidden",true); } that.controlCanvas.render(); }; this.controlCanvas.render(); if(this.options.hidebuttons){ hidebuttons(); return; } if(VismoUtils.browser.isIE6) return; var enabled = this.getEnabledControls(); var pan,zoom; if(enabled.contains("pan")) pan = true; if(enabled.contains("zoom")) zoom = true; var callback = function(response){ if(!response)return; if(!VismoUtils.svgSupport())return; var shape; if(false == true){ //return; shape = document.createElement("div"); shape.innerHTML = response; } else{ shape = document.createElement('object'); shape.setAttribute('codebase', 'http://www.adobe.com/svg/viewer/install/'); if(VismoUtils.browser.isIE)shape.setAttribute('classid', '15'); shape.setAttribute('style',"overflow:hidden;position:absolute;z-index:0;width:60px;height:120px;"); shape.setAttribute('type',"image/svg+xml"); var dataString = 'data:image/svg+xml,'+ response; shape.setAttribute('data', dataString); // the "<svg>...</svg>" returned from Ajax call jQuery(shape).css({width:60,height:120}) } that.controlDiv.appendChild(shape); jQuery(that.controlDiv).css({"background-image":"none"}); hidebuttons(); }; if(pan && zoom) callback(this.panzoomcontrolsSVG); } ,getTransformation: function(){ return this.transformation; } ,translate: function(x,y){ var t= this.getTransformation(); t.translate.x = x; t.translate.y = y; this.transform(); }, addMouseWheelZooming: function(){ /*not supported for internet explorer*/ var that = this; this._addEnabledControl("mousewheelzooming"); this.crosshair = {lastdelta:false}; this.crosshair.pos = {x:0,y:0}; var t = this.getTransformation(); var mw = this.wrapper.onmousewheel; var that = this; var mm = this.wrapper.onmousemove; var doingmw = false; var mwactive = false; var cancelMouseZoomCursor = function(){ if(VismoUtils.browser.isIE6)that.wrapper.style.cursor = ""; else jQuery(that.wrapper).removeClass("zooming"); } jQuery(this.wrapper).mousedown(function(e){ mwactive = true; if(VismoUtils.browser.isIE6)this.style.cursor = "crosshair"; else { if(!jQuery(that.wrapper).hasClass("panning")){ jQuery(that.wrapper).addClass("zooming"); } } window.setTimeout(cancelMouseZoomCursor,2000); }); jQuery(this.wrapper).mouseout(function(e){ var newTarget; if(e.toElement) newTarget = e.toElement; else newTarget = e.relatedTarget; if(jQuery(newTarget,that.wrapper).length == 0){ //if not a child turn off mwactive = false; } cancelMouseZoomCursor(); }); var domw = function(e){ if(!that.enabled) return; /* thanks to http://adomas.org/javascript-mouse-wheel */ var delta = 0; if(!that.goodToTransform(e)) { doingmw = false; return false; } var t = VismoClickingUtils.resolveTargetWithVismo(e); if(t != that.wrapper && t.parentNode !=that.wrapper) return false; if (e.wheelDelta) { /* IE/Opera. */ delta = e.wheelDelta/120; /** In Opera 9, delta differs in sign as compared to IE. */ if (window.opera) delta = -delta; } else if (e.detail) { /** Mozilla case. */ /** In Mozilla, sign of delta is different than in IE. * Also, delta is multiple of 3. */ delta = -e.detail/3; } var sensitivity = 0.4; var transform = that.getTransformation(); var scale =transform.scale; var origin = transform.origin; var mousepos = VismoClickingUtils.getMouseFromEvent(e); var w = parseInt(that.wrapper.style.width) / 2; var h = parseInt(that.wrapper.style.height) / 2; var translation = VismoTransformations.undoTransformation(mousepos.x,mousepos.y,that.transformation); transform.translate= {x: -translation.x, y: -translation.y}; //{x: -mousepos.x + w,y: -mousepos.y + h}; transform.origin = { x: mousepos.x, y: mousepos.y }; if(delta > that.crosshair.lastdelta + sensitivity || delta < that.crosshair.lastdelta - sensitivity){ var newx,newy; if(delta > 0){ newx = parseFloat(scale.x) * 2; newy = parseFloat(scale.y) * 2; } else{ newx = parseFloat(scale.x) / 2; newy = parseFloat(scale.y) / 2; } if(newx > 0 && newy > 0){ scale.x = newx; scale.y = newy; that.setTransformation(transform); } } that.crosshair.lastdelta = delta; doingmw = false; return false; }; var onmousewheel = function(e){ if(!VismoUtils.browser.isIE){ jQuery(that.wrapper).addClass("zooming"); } if(e.preventDefault){e.preventDefault();} if (e && e.stopPropagation) { e.stopPropagation(); } e.cancelBubble=true; if(!mwactive) return false; if(!doingmw) { var f = function(){ domw(e); return false; }; window.setTimeout(f,50); doingmw = true; } return false; };
var el = e.target; //var el = e.srcElement; if(!el) return; while(el != element){ if(el == element) { onmousewheel(e); return false; } el = el.parentNode; } return; }; window.onmousewheel = document.onmousewheel; return; } else if (element.addEventListener){ element.onmousewheel = onmousewheel; //safari element.addEventListener('DOMMouseScroll', onmousewheel, false);/** DOMMouseScroll is for mozilla. */ } else if(element.attachEvent){ element.attachEvent("onmousewheel", onmousewheel); //safari } else{ //it's ie.. or something non-standardised. do nowt //window.onmousewheel = document.onmousewheel = onmousewheel; } } ,disable: function(){ //console.log("disabled"); jQuery(".vismoControls",this.wrapper).css({display:"none"}); this.enabled = false; } ,enable: function(){ //console.log("enabled"); this.enabled = true; jQuery(".vismoControls",this.wrapper).css({display:""}); } ,goodToTransform: function(e){ var t = VismoClickingUtils.resolveTarget(e); switch(t.tagName){ case "INPUT": return false; case "SELECT": return false; case "OPTION": return false; } if(t && t.getAttribute("class") == "vismoControl") return false; return true; } ,addMousePanning: function(){ this._addEnabledControl("mousepanning"); var that = this; var el = that.wrapper; var md = el.onmousedown; var mu = el.onmouseup; var mm = el.onmousemove; var panning_status = false; //alert('here'); //jQuery(document).mouseup(function(e){alert("cool");}); //doesn't work?! var intervalMS = 100; if(VismoUtils.browser.isIE6){ intervalMS = 300; } var interval; var cancelPanning = function(e){ if(interval)window.clearInterval(interval); panning_status = false; that.transform(); if(!VismoUtils.browser.isIE6){jQuery(that.wrapper).removeClass("panning");} //style.cursor= that.defaultCursor; that.wrapper.onmousemove = mm; return false; }; jQuery(that.controlDiv).mousedown(function(e){ cancelPanning(); }); var onmousemove = function(e){ if(e && e.shiftKey) {return false;} if(mm){mm(e);} if(!that.enabled) {return;} if(!panning_status) { return; } if(!VismoUtils.browser.isIE && !jQuery(that.wrapper).hasClass("panning")){ jQuery(that.wrapper).addClass("panning") } if(!that.goodToTransform(e)) {return;} var pos = VismoClickingUtils.getMouseFromEventRelativeToElement(e,panning_status.clickpos.x,panning_status.clickpos.y,panning_status.elem); if(!pos){return;} var t = that.getTransformation(); //if(this.transformation) t = this.transformation; var sc = t.scale; /* work out deltas */ var xd =parseFloat(pos.x /sc.x); var yd = parseFloat(pos.y / sc.y); t.translate.x = panning_status.translate.x + xd; t.translate.y =panning_status.translate.y +yd; if(!VismoUtils.browser.isIE6){ jQuery(that.wrapper).removeClass("zooming"); //that.transform(); } if(pos.x > 5 || pos.y > 5) panning_status.isClick = false; if(pos.x < 5|| pos.y < 5) panning_status.isClick = false; return false; }; jQuery(this.wrapper).mousedown(function(e){ if(e.button != 2)e.preventDefault(); var jqw = jQuery(that.wrapper); if(panning_status){ return; } if(md) {md(e);} if(!that.enabled) return; interval =window.setInterval(function(){that.transform();},intervalMS); if(!VismoUtils.browser.isIE6){ jqw.addClass("panning"); } var target = VismoClickingUtils.resolveTarget(e); target = el; if(!target) return; var t = that.transformation.translate; var sc =that.transformation.scale; var realpos = VismoClickingUtils.getMouseFromEvent(e); if(!realpos) return; //this.vismoController = that; var element = VismoClickingUtils.resolveTargetWithVismo(e); element = el; panning_status = {clickpos: realpos, translate:{x: t.x,y:t.y},elem: element,isClick:true}; that.wrapper.onmousemove = onmousemove; }); jQuery(document).mouseup(function(e){ e.preventDefault(); if(panning_status.isClick && mu){ mu(e); }; if(panning_status){ cancelPanning(e); } }); jQuery(document).mousemove(function(e){ if(panning_status){ onmousemove(e); var parent= e.target; while(parent.parentNode){ parent = parent.parentNode; if(parent == that.wrapper) return; } //if(parent != that.wrapper)cancelPanning(e); (not a good idea for tooltips) } }); }, setTransformation: function(t){ if(this.limits){ if(this.limits.scale){ if(t.scale.x > this.limits.scale.x){ t.scale.x = this.limits.scale.x;} if(t.scale.y > this.limits.scale.y){ t.scale.y = this.limits.scale.y; } if(t.scale.x < this.limits.scale.minx){ t.scale.x = this.limits.scale.minx;} if(t.scale.y < this.limits.scale.miny){ t.scale.y = this.limits.scale.miny;} } } if(!t.origin){ var w = jQuery(this.wrapper).width(); var h = jQuery(this.wrapper).height(); t.origin = {x: w/2, y: h/2}; } if(this.enabled){ if(!t.scale && !t.translate && !t.rotate) alert("bad transformation applied - any call to setTransformation must contain translate,scale and rotate"); this.transformation = t; try{this.handler(t);}catch(e){}; } //console.log("transformation set to ",t); }, createButtonLabel: function(r,type,offset){ var properties= {'shape':'path', stroke: this.options.controlStroke,lineWidth: '1','z-index':'2'}; properties.actiontype = type; var coords=[]; if(type == 'E'){ coords =[r,0,-r,0,'M',r,0,0,-r,"M",r,0,0,r]; } else if(type =='W'){ coords =[-r,0,r,0,'M',-r,0,0,r,"M",-r,0,0,-r]; } else if(type == 'S'){ coords =[0,-r,0,r,'M',0,r,-r,0,"M",0,r,r,0]; } else if(type == 'N'){ coords =[0,-r,0,r,'M',0,-r,r,0,"M",0,-r,-r,0]; } else if(type == 'in'){ coords =[-r,0,r,0,"M",0,-r,0,r]; } else if(type == 'out'){ coords = [-r,0,r,0]; } for(var i=0; i < coords.length; i+=2 ){ if(coords[i] == "M") i+=1; coords[i] += offset.x; coords[i+1] += offset.y; } return new VismoShape(properties,coords); }, createButton: function(width,direction,offset,properties) { var canvas = this.controlCanvas; if(!canvas) throw "no canvas to create on.."; if(!width) width = 120; var r = width/2; offset = { x: offset.x || 0, y: offset.y || 0 }; var coords; if(this.options.controlShape && this.options.controlShape == 'circle'){ coords = [ offset.x , offset.y, width/2 ]; properties.shape = 'circle'; } else{ coords = [ offset.x, offset.y, offset.x + width, offset.y, offset.x + width, offset.y + width, offset.x, offset.y + width ]; properties.shape = 'polygon'; } properties.fill =this.options.controlFill; properties.stroke =this.options.controlStroke; var button = new VismoShape(properties,coords); var bb = button.getBoundingBox(); buttoncenter = {x:bb.center.x,y:bb.center.y}; var label = this.createButtonLabel(r-2,properties.actiontype,buttoncenter); canvas.add(label); canvas.add(button); return button; }, addControls: function(list){ for(var i= 0; i < list.length; i++){ this.addControl(list[i]); } } ,addControl: function(controlType) { switch(controlType) { //case "zoom": case "pan": this.addPanningActions(); break; case "zoom": this.addZoomingActions(); break; case "mousepanning": this.addMousePanning(); break; case "mousewheelzooming": this.addMouseWheelZooming(); break; case "rotation": this.addRotatingActions(); break; default: break; } }, addPanningActions: function(controlDiv){ this._addEnabledControl("pan"); this.createButton(12,180,{x:-6,y:-54},{'actiontype':'N','name':'pan north','buttonType': 'narrow'}); this.createButton(12,270,{x:10,y:-38},{'actiontype':'E','name':'pan east','buttonType': 'earrow'}); //this.createButton(10,90,{x:16,y:16},{'actiontype':'O','name':'re-center','buttonType': ''}); this.createButton(12,90,{x:-22,y:-38},{'actiontype':'W','name':'pan west','buttonType': 'warrow'}); this.createButton(12,0,{x:-6,y:-20},{'actiontype':'S','name':'pan south','buttonType': 'sarrow'}); this.applyLayer(); }, addRotatingActions: function(){ /* var rotateCanvas = this.controlCanvas.getDomElement(); this.createButton(rotateCanvas,10,180,{x:16,y:2},{'actiontype':'rotatezup','name':'pan north','buttonType': 'narrow'}); this.createButton(rotateCanvas,10,0,{x:16,y:30},{'actiontype':'rotatezdown','name':'pan south','buttonType': 'sarrow'}); this.createButton(rotateCanvas,10,270,{x:30,y:16},{'actiontype':'rotatezright','name':'rotate to right','buttonType': 'earrow'}); this.createButton(rotateCanvas,10,90,{x:2,y:16},{'actiontype':'rotatezleft','name':'rotate to left','buttonType': 'warrow'}); rotateCanvas.onmouseup = this._panzoomClickHandler;*/ }, addZoomingActions: function(){ this._addEnabledControl("zoom"); this.createButton(12,180,{x:-6,y:12},{'actiontype':'in','name':'zoom in','buttonType': 'plus'}); this.createButton(12,180,{x:-6,y:42},{'actiontype':'out','name':'zoom out','buttonType': 'minus'}); this.applyLayer(); } ,zoom: function(x,y){ var t = this.getTransformation(); t.scale.x = x; if(!y) y= x; t.scale.y = y; this.setTransformation(t); } ,panTo: function(x,y){ //if(!this.enabled) return; var t = this.getTransformation(); var finalX = -x; var finalY = -y; var thisx,thisy; var direction = {}; var difference = {}; thisx = t.translate.x; thisy = t.translate.y; difference.x= thisx - finalX; difference.y = thisy - finalY; direction.x = -difference.x / 5; direction.y = -difference.y / 5; var change = true; var that = this; var f = function(){ change= {x: false,y:false}; if(thisx > finalX && thisx + direction.x > finalX) {thisx += direction.x;change.x=true;} else if(thisx < finalX && thisx + direction.x < finalX) {thisx += direction.x;change.x=true;} else{ t.translate.x = finalX; } if(thisy > finalY && thisy + direction.y > finalY) {thisy += direction.y;change.y=true;} else if(thisy < finalY && thisy + direction.y < finalY) {thisy += direction.y;change.y=true;} else{ change.x = true; t.translate.y =finalY; } if(change.x){ t.translate.x = thisx; } else{ t.translate.x = finalX; } if(change.y){ t.translate.y = thisy; } else{ t.translate.y = finalY; } if(t.translate.x != finalX && t.translate.y != finalY){ that.setTransformation(t); window.setTimeout(f,5); } else{ that.setTransformation(t); } }; f(); //window.setTimeout(pan,200); } ,transform: function(){ if(this.enabled){ var t = this.getTransformation(); var s = t.scale; var tr = t.translate; if(s.x <= 0) s.x = 0.1125; if(s.y <= 0) s.y = 0.1125; var ok = true; var lim = this.limits; if(lim.scale){ if(s.y < lim.scale.miny) t.scale.y = lim.scale.miny; if(s.x < lim.scale.minx) t.scale.x = lim.scale.minx; if(s.y > lim.scale.y) t.scale.y = lim.scale.y; if(s.x > lim.scale.x) t.scale.x = lim.scale.x; } this.handler(this.transformation); } }, _panzoomClickHandler: function(e,hit,controller) { if(!hit) return; var pan = {}; var t =controller.getTransformation(); if(!t.scale) t.scale = {x:1,y:1}; if(!t.translate) t.translate = {x:0,y:0}; if(!t.rotate) t.rotate = {x:0,y:0,z:0}; var scale =t.scale; pan.x = parseFloat(this.pansensitivity / scale.x); pan.y = parseFloat(this.pansensitivity / scale.y); switch(hit.getProperty("actiontype")) { case "W": t.translate.x += pan.x; break; case "O": t.translate.x = 0; t.translate.y = 0; break; case "E": t.translate.x -= pan.x; break; case "N": t.translate.y += pan.y; break; case "S": t.translate.y -= pan.y; break; case "in": scale.x *= this.options.zoomfactor; scale.y *= this.options.zoomfactor; break; case "out": scale.x /= this.options.zoomfactor; scale.y /= this.options.zoomfactor; break; case "rotatezright": if(!t.rotate.z) t.rotate.z = 0; //console.log("right",t.rotate.z); t.rotate.z -= 0.1; var left =6.28318531; if(t.rotate.z <0 )t.rotate.z =left; break; case "rotatezup": if(!t.rotate.y) t.rotate.y = 0; t.rotate.y += 0.1; break; case "rotatezdown": if(!t.rotate.y) t.rotate.y = 0; t.rotate.y -= 0.1; break; case "rotatezleft": if(!t.rotate.z) t.rotate.z = 0; t.rotate.z += 0.1; break; default: break; } controller.transform(); return false; } };
var element = this.wrapper; if(VismoUtils.browser.isIE) { document.onmousewheel = function(e){ if(!e)e = window.event;
random_line_split
VismoController.js
/*requires VismoShapes Adds controls such as panning and zooming to a given dom element. Mousewheel zooming currently not working as should - should center on location where mousewheel occurs Will be changed to take a handler parameter rather then a targetjs */ var VismoController = function(elem,options){ //elem must have style.width and style.height etM if(elem.length){ //for jquery var result = []; for(var i=0; i < elem.length; i++){ var x = new VismoController(elem[i],options); result.push(x); } return x; } if(!options)options = {}; if(!options.zoomfactor)options.zoomfactor=2; if(elem.vismoController) throw "this already has a vismo controller!" elem.vismoController = true;// this; this.enabledControls = []; if(typeof elem == 'string') elem= document.getElementById(elem); this.setLimits({}); //jQuery(elem).css() //if(!elem.style || !elem.style.position) elem.style.position = "relative"; this.wrapper = elem; //a dom element to detect mouse actions this.handler = options.handler; //a js object to run actions on (with pan and zoom functions) this.defaultCursor = ""; var md = elem.onmousedown; var mu = elem.onmouseup; var mm = elem.onmousemove; for(var i=0; i < elem.childNodes.length; i++){ var child = elem.childNodes[i]; try{ child.onmousedown = function(e){if(md)md(e);} child.onmouseup = function(e){if(mu)mu(e);} child.onmousemove = function(e){if(mm)mm(e);} } catch(e){ } } controlDiv = document.createElement('div'); controlDiv.style.position = "absolute"; controlDiv.style.top = "0"; controlDiv.style.left = "0"; controlDiv.className = 'vismoControls'; jQuery(controlDiv).css({'z-index':10000, height:"120px",width:"60px"}); this.wrapper.appendChild(controlDiv); this.controlDiv = controlDiv; this.controlCanvas = new VismoCanvas(this.controlDiv); jQuery(this.controlDiv).mouseover(function(e){e.stopPropagation();e.preventDefault();}); //this.controlDiv.vismoController = this; var vismoController = this; var preventDef = function(e){ if(e.button == 2) return true; if (e && e.stopPropagation) //if stopPropagation method supported e.stopPropagation() else e.cancelBubble=true return false; }; var that = this; var f = function(e,s){ var vismoController = that; vismoController._panzoomClickHandler(e,s,vismoController); return preventDef(e); }; this.controlCanvas.mouse({up:preventDef,down:f,dblclick:preventDef}); //this.wrapper.vismoController = this; var start_transformation = options.transformation; if(start_transformation){ if(!start_transformation.origin) start_transformation.origin = {}; this.transformation = start_transformation; } else{ this.transformation = {'translate':{x:0,y:0}, 'scale': {x:1, y:1},'rotate': {x:0,y:0,z:0},origin:{}}; } this.transformation.origin.x = jQuery(elem).width() / 2; this.transformation.origin.y = jQuery(elem).height() / 2; var t = this.transformation; //looks for specifically named function in targetjs if(!this.handler) { alert("no transform handler function defined"); } //this.wrapper.vismoController = this; this.enabled = true; if(!options) options = {}; if(!options.controls)options.controls =['pan','zoom','mousepanning','mousewheelzooming']; this.options = options; if(!this.options.controlStroke){ this.options.controlStroke = "#000000"; } if(!this.options.controlFill){ this.options.controlFill = "rgba(150,150,150,0.7)"; } this.addControls(this.options.controls); this.limits = {scale:{}}; if(this.options.maxZoom) { this.limits.scale.x =this.options.maxZoom; this.limits.scale.y = this.options.maxZoom; } if(this.options.minZoom){ this.limits.scale.minx =this.options.minZoom; this.limits.scale.miny =this.options.minZoom; } this.pansensitivity =100; if(this.options.pansensitivity){ this.pansensitivity =this.options.pansensitivity; } jQuery(window).unload(function(){ that.controlCanvas = null; that.controlDiv = null; }) }; VismoController.prototype = { setLimits: function(transformation){ this.limits = transformation; } ,getHandler: function(){ return this.handler; } ,setHandler: function(handler){ this.handler = handler; handler(this.transformation); } ,getEnabledControls: function(){ return this.enabledControls; } ,_addEnabledControl: function(controlName){ this.enabledControls.push(controlName); } ,applyLayer: function(){ var that = this; var hidebuttons = function(){ var shapes = that.controlCanvas.getMemory(); for(var i=0; i < shapes.length; i++){ shapes[i].setProperty("hidden",true); } that.controlCanvas.render(); }; this.controlCanvas.render(); if(this.options.hidebuttons){ hidebuttons(); return; } if(VismoUtils.browser.isIE6) return; var enabled = this.getEnabledControls(); var pan,zoom; if(enabled.contains("pan")) pan = true; if(enabled.contains("zoom")) zoom = true; var callback = function(response){ if(!response)return; if(!VismoUtils.svgSupport())return; var shape; if(false == true){ //return; shape = document.createElement("div"); shape.innerHTML = response; } else{ shape = document.createElement('object'); shape.setAttribute('codebase', 'http://www.adobe.com/svg/viewer/install/'); if(VismoUtils.browser.isIE)shape.setAttribute('classid', '15'); shape.setAttribute('style',"overflow:hidden;position:absolute;z-index:0;width:60px;height:120px;"); shape.setAttribute('type',"image/svg+xml"); var dataString = 'data:image/svg+xml,'+ response; shape.setAttribute('data', dataString); // the "<svg>...</svg>" returned from Ajax call jQuery(shape).css({width:60,height:120}) } that.controlDiv.appendChild(shape); jQuery(that.controlDiv).css({"background-image":"none"}); hidebuttons(); }; if(pan && zoom) callback(this.panzoomcontrolsSVG); } ,getTransformation: function(){ return this.transformation; } ,translate: function(x,y){ var t= this.getTransformation(); t.translate.x = x; t.translate.y = y; this.transform(); }, addMouseWheelZooming: function(){ /*not supported for internet explorer*/ var that = this; this._addEnabledControl("mousewheelzooming"); this.crosshair = {lastdelta:false}; this.crosshair.pos = {x:0,y:0}; var t = this.getTransformation(); var mw = this.wrapper.onmousewheel; var that = this; var mm = this.wrapper.onmousemove; var doingmw = false; var mwactive = false; var cancelMouseZoomCursor = function(){ if(VismoUtils.browser.isIE6)that.wrapper.style.cursor = ""; else jQuery(that.wrapper).removeClass("zooming"); } jQuery(this.wrapper).mousedown(function(e){ mwactive = true; if(VismoUtils.browser.isIE6)this.style.cursor = "crosshair"; else { if(!jQuery(that.wrapper).hasClass("panning")){ jQuery(that.wrapper).addClass("zooming"); } } window.setTimeout(cancelMouseZoomCursor,2000); }); jQuery(this.wrapper).mouseout(function(e){ var newTarget; if(e.toElement) newTarget = e.toElement; else newTarget = e.relatedTarget; if(jQuery(newTarget,that.wrapper).length == 0){ //if not a child turn off mwactive = false; } cancelMouseZoomCursor(); }); var domw = function(e){ if(!that.enabled) return; /* thanks to http://adomas.org/javascript-mouse-wheel */ var delta = 0; if(!that.goodToTransform(e)) { doingmw = false; return false; } var t = VismoClickingUtils.resolveTargetWithVismo(e); if(t != that.wrapper && t.parentNode !=that.wrapper) return false; if (e.wheelDelta) { /* IE/Opera. */ delta = e.wheelDelta/120; /** In Opera 9, delta differs in sign as compared to IE. */ if (window.opera) delta = -delta; } else if (e.detail) { /** Mozilla case. */ /** In Mozilla, sign of delta is different than in IE. * Also, delta is multiple of 3. */ delta = -e.detail/3; } var sensitivity = 0.4; var transform = that.getTransformation(); var scale =transform.scale; var origin = transform.origin; var mousepos = VismoClickingUtils.getMouseFromEvent(e); var w = parseInt(that.wrapper.style.width) / 2; var h = parseInt(that.wrapper.style.height) / 2; var translation = VismoTransformations.undoTransformation(mousepos.x,mousepos.y,that.transformation); transform.translate= {x: -translation.x, y: -translation.y}; //{x: -mousepos.x + w,y: -mousepos.y + h}; transform.origin = { x: mousepos.x, y: mousepos.y }; if(delta > that.crosshair.lastdelta + sensitivity || delta < that.crosshair.lastdelta - sensitivity){ var newx,newy; if(delta > 0){ newx = parseFloat(scale.x) * 2; newy = parseFloat(scale.y) * 2; } else{ newx = parseFloat(scale.x) / 2; newy = parseFloat(scale.y) / 2; } if(newx > 0 && newy > 0){ scale.x = newx; scale.y = newy; that.setTransformation(transform); } } that.crosshair.lastdelta = delta; doingmw = false; return false; }; var onmousewheel = function(e){ if(!VismoUtils.browser.isIE){ jQuery(that.wrapper).addClass("zooming"); } if(e.preventDefault){e.preventDefault();} if (e && e.stopPropagation) { e.stopPropagation(); } e.cancelBubble=true; if(!mwactive) return false; if(!doingmw) { var f = function(){ domw(e); return false; }; window.setTimeout(f,50); doingmw = true; } return false; }; var element = this.wrapper; if(VismoUtils.browser.isIE) { document.onmousewheel = function(e){ if(!e)e = window.event; var el = e.target; //var el = e.srcElement; if(!el) return; while(el != element){ if(el == element) { onmousewheel(e); return false; } el = el.parentNode; } return; }; window.onmousewheel = document.onmousewheel; return; } else if (element.addEventListener){ element.onmousewheel = onmousewheel; //safari element.addEventListener('DOMMouseScroll', onmousewheel, false);/** DOMMouseScroll is for mozilla. */ } else if(element.attachEvent){ element.attachEvent("onmousewheel", onmousewheel); //safari } else{ //it's ie.. or something non-standardised. do nowt //window.onmousewheel = document.onmousewheel = onmousewheel; } } ,disable: function(){ //console.log("disabled"); jQuery(".vismoControls",this.wrapper).css({display:"none"}); this.enabled = false; } ,enable: function(){ //console.log("enabled"); this.enabled = true; jQuery(".vismoControls",this.wrapper).css({display:""}); } ,goodToTransform: function(e){ var t = VismoClickingUtils.resolveTarget(e); switch(t.tagName){ case "INPUT": return false; case "SELECT": return false; case "OPTION": return false; } if(t && t.getAttribute("class") == "vismoControl") return false; return true; } ,addMousePanning: function(){ this._addEnabledControl("mousepanning"); var that = this; var el = that.wrapper; var md = el.onmousedown; var mu = el.onmouseup; var mm = el.onmousemove; var panning_status = false; //alert('here'); //jQuery(document).mouseup(function(e){alert("cool");}); //doesn't work?! var intervalMS = 100; if(VismoUtils.browser.isIE6){ intervalMS = 300; } var interval; var cancelPanning = function(e){ if(interval)window.clearInterval(interval); panning_status = false; that.transform(); if(!VismoUtils.browser.isIE6){jQuery(that.wrapper).removeClass("panning");} //style.cursor= that.defaultCursor; that.wrapper.onmousemove = mm; return false; }; jQuery(that.controlDiv).mousedown(function(e){ cancelPanning(); }); var onmousemove = function(e){ if(e && e.shiftKey) {return false;} if(mm){mm(e);} if(!that.enabled) {return;} if(!panning_status)
if(!VismoUtils.browser.isIE && !jQuery(that.wrapper).hasClass("panning")){ jQuery(that.wrapper).addClass("panning") } if(!that.goodToTransform(e)) {return;} var pos = VismoClickingUtils.getMouseFromEventRelativeToElement(e,panning_status.clickpos.x,panning_status.clickpos.y,panning_status.elem); if(!pos){return;} var t = that.getTransformation(); //if(this.transformation) t = this.transformation; var sc = t.scale; /* work out deltas */ var xd =parseFloat(pos.x /sc.x); var yd = parseFloat(pos.y / sc.y); t.translate.x = panning_status.translate.x + xd; t.translate.y =panning_status.translate.y +yd; if(!VismoUtils.browser.isIE6){ jQuery(that.wrapper).removeClass("zooming"); //that.transform(); } if(pos.x > 5 || pos.y > 5) panning_status.isClick = false; if(pos.x < 5|| pos.y < 5) panning_status.isClick = false; return false; }; jQuery(this.wrapper).mousedown(function(e){ if(e.button != 2)e.preventDefault(); var jqw = jQuery(that.wrapper); if(panning_status){ return; } if(md) {md(e);} if(!that.enabled) return; interval =window.setInterval(function(){that.transform();},intervalMS); if(!VismoUtils.browser.isIE6){ jqw.addClass("panning"); } var target = VismoClickingUtils.resolveTarget(e); target = el; if(!target) return; var t = that.transformation.translate; var sc =that.transformation.scale; var realpos = VismoClickingUtils.getMouseFromEvent(e); if(!realpos) return; //this.vismoController = that; var element = VismoClickingUtils.resolveTargetWithVismo(e); element = el; panning_status = {clickpos: realpos, translate:{x: t.x,y:t.y},elem: element,isClick:true}; that.wrapper.onmousemove = onmousemove; }); jQuery(document).mouseup(function(e){ e.preventDefault(); if(panning_status.isClick && mu){ mu(e); }; if(panning_status){ cancelPanning(e); } }); jQuery(document).mousemove(function(e){ if(panning_status){ onmousemove(e); var parent= e.target; while(parent.parentNode){ parent = parent.parentNode; if(parent == that.wrapper) return; } //if(parent != that.wrapper)cancelPanning(e); (not a good idea for tooltips) } }); }, setTransformation: function(t){ if(this.limits){ if(this.limits.scale){ if(t.scale.x > this.limits.scale.x){ t.scale.x = this.limits.scale.x;} if(t.scale.y > this.limits.scale.y){ t.scale.y = this.limits.scale.y; } if(t.scale.x < this.limits.scale.minx){ t.scale.x = this.limits.scale.minx;} if(t.scale.y < this.limits.scale.miny){ t.scale.y = this.limits.scale.miny;} } } if(!t.origin){ var w = jQuery(this.wrapper).width(); var h = jQuery(this.wrapper).height(); t.origin = {x: w/2, y: h/2}; } if(this.enabled){ if(!t.scale && !t.translate && !t.rotate) alert("bad transformation applied - any call to setTransformation must contain translate,scale and rotate"); this.transformation = t; try{this.handler(t);}catch(e){}; } //console.log("transformation set to ",t); }, createButtonLabel: function(r,type,offset){ var properties= {'shape':'path', stroke: this.options.controlStroke,lineWidth: '1','z-index':'2'}; properties.actiontype = type; var coords=[]; if(type == 'E'){ coords =[r,0,-r,0,'M',r,0,0,-r,"M",r,0,0,r]; } else if(type =='W'){ coords =[-r,0,r,0,'M',-r,0,0,r,"M",-r,0,0,-r]; } else if(type == 'S'){ coords =[0,-r,0,r,'M',0,r,-r,0,"M",0,r,r,0]; } else if(type == 'N'){ coords =[0,-r,0,r,'M',0,-r,r,0,"M",0,-r,-r,0]; } else if(type == 'in'){ coords =[-r,0,r,0,"M",0,-r,0,r]; } else if(type == 'out'){ coords = [-r,0,r,0]; } for(var i=0; i < coords.length; i+=2 ){ if(coords[i] == "M") i+=1; coords[i] += offset.x; coords[i+1] += offset.y; } return new VismoShape(properties,coords); }, createButton: function(width,direction,offset,properties) { var canvas = this.controlCanvas; if(!canvas) throw "no canvas to create on.."; if(!width) width = 120; var r = width/2; offset = { x: offset.x || 0, y: offset.y || 0 }; var coords; if(this.options.controlShape && this.options.controlShape == 'circle'){ coords = [ offset.x , offset.y, width/2 ]; properties.shape = 'circle'; } else{ coords = [ offset.x, offset.y, offset.x + width, offset.y, offset.x + width, offset.y + width, offset.x, offset.y + width ]; properties.shape = 'polygon'; } properties.fill =this.options.controlFill; properties.stroke =this.options.controlStroke; var button = new VismoShape(properties,coords); var bb = button.getBoundingBox(); buttoncenter = {x:bb.center.x,y:bb.center.y}; var label = this.createButtonLabel(r-2,properties.actiontype,buttoncenter); canvas.add(label); canvas.add(button); return button; }, addControls: function(list){ for(var i= 0; i < list.length; i++){ this.addControl(list[i]); } } ,addControl: function(controlType) { switch(controlType) { //case "zoom": case "pan": this.addPanningActions(); break; case "zoom": this.addZoomingActions(); break; case "mousepanning": this.addMousePanning(); break; case "mousewheelzooming": this.addMouseWheelZooming(); break; case "rotation": this.addRotatingActions(); break; default: break; } }, addPanningActions: function(controlDiv){ this._addEnabledControl("pan"); this.createButton(12,180,{x:-6,y:-54},{'actiontype':'N','name':'pan north','buttonType': 'narrow'}); this.createButton(12,270,{x:10,y:-38},{'actiontype':'E','name':'pan east','buttonType': 'earrow'}); //this.createButton(10,90,{x:16,y:16},{'actiontype':'O','name':'re-center','buttonType': ''}); this.createButton(12,90,{x:-22,y:-38},{'actiontype':'W','name':'pan west','buttonType': 'warrow'}); this.createButton(12,0,{x:-6,y:-20},{'actiontype':'S','name':'pan south','buttonType': 'sarrow'}); this.applyLayer(); }, addRotatingActions: function(){ /* var rotateCanvas = this.controlCanvas.getDomElement(); this.createButton(rotateCanvas,10,180,{x:16,y:2},{'actiontype':'rotatezup','name':'pan north','buttonType': 'narrow'}); this.createButton(rotateCanvas,10,0,{x:16,y:30},{'actiontype':'rotatezdown','name':'pan south','buttonType': 'sarrow'}); this.createButton(rotateCanvas,10,270,{x:30,y:16},{'actiontype':'rotatezright','name':'rotate to right','buttonType': 'earrow'}); this.createButton(rotateCanvas,10,90,{x:2,y:16},{'actiontype':'rotatezleft','name':'rotate to left','buttonType': 'warrow'}); rotateCanvas.onmouseup = this._panzoomClickHandler;*/ }, addZoomingActions: function(){ this._addEnabledControl("zoom"); this.createButton(12,180,{x:-6,y:12},{'actiontype':'in','name':'zoom in','buttonType': 'plus'}); this.createButton(12,180,{x:-6,y:42},{'actiontype':'out','name':'zoom out','buttonType': 'minus'}); this.applyLayer(); } ,zoom: function(x,y){ var t = this.getTransformation(); t.scale.x = x; if(!y) y= x; t.scale.y = y; this.setTransformation(t); } ,panTo: function(x,y){ //if(!this.enabled) return; var t = this.getTransformation(); var finalX = -x; var finalY = -y; var thisx,thisy; var direction = {}; var difference = {}; thisx = t.translate.x; thisy = t.translate.y; difference.x= thisx - finalX; difference.y = thisy - finalY; direction.x = -difference.x / 5; direction.y = -difference.y / 5; var change = true; var that = this; var f = function(){ change= {x: false,y:false}; if(thisx > finalX && thisx + direction.x > finalX) {thisx += direction.x;change.x=true;} else if(thisx < finalX && thisx + direction.x < finalX) {thisx += direction.x;change.x=true;} else{ t.translate.x = finalX; } if(thisy > finalY && thisy + direction.y > finalY) {thisy += direction.y;change.y=true;} else if(thisy < finalY && thisy + direction.y < finalY) {thisy += direction.y;change.y=true;} else{ change.x = true; t.translate.y =finalY; } if(change.x){ t.translate.x = thisx; } else{ t.translate.x = finalX; } if(change.y){ t.translate.y = thisy; } else{ t.translate.y = finalY; } if(t.translate.x != finalX && t.translate.y != finalY){ that.setTransformation(t); window.setTimeout(f,5); } else{ that.setTransformation(t); } }; f(); //window.setTimeout(pan,200); } ,transform: function(){ if(this.enabled){ var t = this.getTransformation(); var s = t.scale; var tr = t.translate; if(s.x <= 0) s.x = 0.1125; if(s.y <= 0) s.y = 0.1125; var ok = true; var lim = this.limits; if(lim.scale){ if(s.y < lim.scale.miny) t.scale.y = lim.scale.miny; if(s.x < lim.scale.minx) t.scale.x = lim.scale.minx; if(s.y > lim.scale.y) t.scale.y = lim.scale.y; if(s.x > lim.scale.x) t.scale.x = lim.scale.x; } this.handler(this.transformation); } }, _panzoomClickHandler: function(e,hit,controller) { if(!hit) return; var pan = {}; var t =controller.getTransformation(); if(!t.scale) t.scale = {x:1,y:1}; if(!t.translate) t.translate = {x:0,y:0}; if(!t.rotate) t.rotate = {x:0,y:0,z:0}; var scale =t.scale; pan.x = parseFloat(this.pansensitivity / scale.x); pan.y = parseFloat(this.pansensitivity / scale.y); switch(hit.getProperty("actiontype")) { case "W": t.translate.x += pan.x; break; case "O": t.translate.x = 0; t.translate.y = 0; break; case "E": t.translate.x -= pan.x; break; case "N": t.translate.y += pan.y; break; case "S": t.translate.y -= pan.y; break; case "in": scale.x *= this.options.zoomfactor; scale.y *= this.options.zoomfactor; break; case "out": scale.x /= this.options.zoomfactor; scale.y /= this.options.zoomfactor; break; case "rotatezright": if(!t.rotate.z) t.rotate.z = 0; //console.log("right",t.rotate.z); t.rotate.z -= 0.1; var left =6.28318531; if(t.rotate.z <0 )t.rotate.z =left; break; case "rotatezup": if(!t.rotate.y) t.rotate.y = 0; t.rotate.y += 0.1; break; case "rotatezdown": if(!t.rotate.y) t.rotate.y = 0; t.rotate.y -= 0.1; break; case "rotatezleft": if(!t.rotate.z) t.rotate.z = 0; t.rotate.z += 0.1; break; default: break; } controller.transform(); return false; } };
{ return; }
conditional_block
model.py
""" Model is a wrapper over a set of KERAS models! implements interface for learning over a generator + dataset or statically generated data and for prediction (for all classes) """ import os import cv2 import json import keras import rasterio import numpy as np from keras.utils import generic_utils from keras.models import Model from functools import wraps import geojson from .utils import get_shape from .utils import pad_shape, unpad from .utils import overlap_split from .utils import overlap_concatenate from .config import OrthoSegmModelConfig from .standardizer import Standardizer def _create_dir(*args):
def _find_weights(weights_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(weights_dir))[-1] weights_path = os.path.join(weights_dir, file_name) elif mode == 'best': raise NotImplementedError else: raise NotImplementedError return weights_path def _find_model(model_chkp_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(model_chkp_dir))[-1] model_path = os.path.join(model_chkp_dir, file_name) elif mode == 'best': raise NotImplementedError return model_path def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto', weights_path='auto', model_path='auto', custom_objects=None): if config_path == 'auto': config_path = os.path.join(model_dir, 'config.json') if graph_path == 'auto': graph_path = os.path.join(model_dir, 'graph.json') if weights_path == 'auto': weights_dir = os.path.join(model_dir, 'weights') weights_path = _find_weights(weights_dir) if model_path == 'auto': model_chkp_dir = os.path.join(model_dir, 'models') model_path = _find_model(model_chkp_dir) # load configuration file config = OrthoSegmModelConfig.load_config(config_path) # load model graph file with open(graph_path, 'r') as f: graph = json.load(f) if mode == 'train': model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True) if mode == 'inference': try: model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False) except: model = keras.models.model_from_json(json.dumps(graph)) model.load_weights(weights_path) segmentation_model = SegmentationModel(model_dir) segmentation_model.build(model, config) return segmentation_model class SegmentationModel(Model): """ """ def __init__(self, model_dir): self.config = None self.model = None self._built = False self.model_dir = _create_dir(model_dir) self.log_dir = _create_dir(model_dir, 'log') self.weights_dir = _create_dir(model_dir, 'weights') self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs # input standardization pipeline function self._input_standart = None def __getattr__(self, attr): return getattr(self.model, attr) def build(self, model, config): self.model = model self.config = config # save configurations of model config_path = os.path.join(self.model_dir, 'config.json') if not os.path.exists(config_path): self.config.save(config_path, indent=2) # save graph of model graph_path = os.path.join(self.model_dir, 'graph.json') model_graph = json.loads(model.to_json()) with open(graph_path, 'w') as f: json.dump(model_graph, f, indent=2) st = Standardizer(**self.config.STANDARDISING_PARAMS) self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS) self._built = True def built(func): @wraps(func) def wrapped(self, *args, **kwargs): if self._built: return func(self, *args, **kwargs) else: raise RuntimeError('Your model is not built! Please provide keras model and config.') return wrapped @built def _get_gsd(self): gsd = self.config.GSD if np.isscalar(gsd): gsd = (gsd, gsd) gsd_x = gsd[0] gsd_y = gsd[1] return gsd_x, gsd_y @built def _load_image(self, path, target_size=None, return_transform=False, return_crs=True): dataset_element_name = os.path.basename(path) path = os.path.normpath(path) channels = self.config.CHANNELS target_gsd_x, target_gsd_y = self._get_gsd() # defining local variables for memorizing best of them during iterations transform = None crs = None min_gsd_x = 10e5 min_gsd_y = 10e5 gsd_x = min_gsd_x gsd_y = min_gsd_y max_h = 0 max_w = 0 image_ids = ['20170304', '20170404'] channels_list = [] for image_id in image_ids: channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in channels] for channel_name in channels_: try: # open image(channel) file # use 'r+' mode to support on windows >__< # (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow) with rasterio.open(channel_name, 'r+') as img_obj: # read metadata from image(channel) file tm = list(img_obj.transform) gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) crs = img_obj.crs # remember best gsd and h and w for future resizing if gsd_x * gsd_y < min_gsd_x * min_gsd_y: transform = tm min_gsd_x = gsd_x min_gsd_y = gsd_y max_h = img_obj.height max_w = img_obj.width # read channels img = img_obj.read() img = np.squeeze(img) channels_list.append(img) except FileNotFoundError: print('No such image {}'.format(os.path.basename(channel_name))) raise Exception('No channels!') # define width and heights of our images for our model gsd w = int(max_w * gsd_x / target_gsd_x) h = int(max_h * gsd_y / target_gsd_y) if target_size: w = target_size[1] h = target_size[0] channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list] image = np.array(channels_list) image = np.rollaxis(image, 0, 3) if return_transform: if return_crs: return image, transform, crs else: return image, transform return image @built def _load_masks(self, path): path = os.path.normpath(path) classes = self.config.CLASSES mask_id = os.path.basename(path) masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes] masks_list = [] for m, cls in zip(masks, classes): try: with rasterio.open(m, 'r') as mask_obj: mask = mask_obj.read() mask = np.squeeze(mask) masks_list.append(mask) except FileNotFoundError: print('No such image {}'.format(os.path.basename(m))) raise Exception('No mask for class {}!'.format(cls)) masks = np.array(masks_list) masks = np.rollaxis(masks, 0, 3) # if target_size: # cv2.resize(masks, target_size, cv2.INTER_NEAREST) return masks def _to_binary_masks(self, image, tm): gsd_x, gsd_y = self._get_gsd() target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) # define width and heights of our masks for our model gsd w = int(image.shape[1] * gsd_x / target_gsd_x) h = int(image.shape[0] * gsd_y / target_gsd_y) image = cv2.resize(image, (w, h), cv2.INTER_LINEAR) if image.ndim == 2: image = np.expand_dims(image, axis=-1) return np.rollaxis(image, 2, 0), (w, h) @built def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None, crs=None): image, shape = self._to_binary_masks(image, transform_matrix) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) w, h = shape image_basename = os.path.basename(path) saved_images_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix) saved_images_names.append(image_name) image_path = os.path.join(path, image_name) with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1, dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst: dst.write(image[i].astype(rasterio.uint8), 1) return saved_images_names def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'): """ Saves vector mask from raw model output as .geojson :param raw_mask_path: :param transform: geotransform of initial dataset :param filename: output location absolute path :param trg_crs: target coordinate reference system :param threshold: a threshold for raw mask low-pass filtering :return: """ # plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw) shapes = rasterio.features.shapes(mask, transform=geotransform) # the last shape contains all geometry shapes = list(shapes)[:-1] polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes] crs = { "type": "name", "properties": { "name": trg_crs}} gs = geojson.FeatureCollection(polygons, crs=crs) return geojson.dumps(gs) @built def _save_vector_masks(self, image, path, save_postfix='pred', geotransform=None, trg_crs='epsg:3857', threshold=170): image, shape = self._to_binary_masks(image, geotransform) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) image_basename = os.path.basename(path) saved_geojson_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.geojson'.format(cls, save_postfix) saved_geojson_names.append(image_name) image_path = os.path.join(path, image_name) image_mask = np.array(image[i] > threshold, np.uint8) gs = self.get_vector_markup(image_mask, geotransform, trg_crs) with open(image_path, 'w') as file: file.write(gs) return saved_geojson_names @built def _standardize(self, image): return self._input_standart(image) @built def predict_orthophoto(self, path_to_object, split_size=1024, overlap=64, save=False, save_dir='same', verbose=0): image, transform_matrix, crs = self._load_image(path_to_object, return_transform=True, return_crs=True) h, w = image.shape[:2] h = get_shape(h, split_size, overlap) w = get_shape(w, split_size, overlap) image, paddings = pad_shape(image, (h, w)) images, n_rows, n_cols = overlap_split(image, split_size, overlap) predictions = [] progbar = generic_utils.Progbar(len(images), verbose=verbose) if verbose: print('Predicting image pieces...') for image in images: image = self._standardize(image) image = np.expand_dims(image, axis=0) # input image have to be 4d tensor pred = self.model.predict(image) pred = np.squeeze(pred) # delete useless 0 axis predictions.append(pred) progbar.add(1) if len(predictions) > 1: prediction = overlap_concatenate(predictions, n_rows, n_cols, overlap) else: prediction = np.squeeze(predictions) prediction = unpad(prediction, paddings) if save: if save_dir == 'same': save_dir = path_to_object round_prediction = np.round(prediction) self._save_masks(round_prediction, save_dir, transform_matrix=transform_matrix, crs=crs) return prediction, transform_matrix
path = os.path.join(*args) if not os.path.exists(path): os.makedirs(path) return path
identifier_body
model.py
""" Model is a wrapper over a set of KERAS models! implements interface for learning over a generator + dataset or statically generated data and for prediction (for all classes) """ import os import cv2 import json import keras import rasterio import numpy as np from keras.utils import generic_utils from keras.models import Model from functools import wraps import geojson from .utils import get_shape from .utils import pad_shape, unpad from .utils import overlap_split from .utils import overlap_concatenate from .config import OrthoSegmModelConfig from .standardizer import Standardizer def _create_dir(*args): path = os.path.join(*args) if not os.path.exists(path): os.makedirs(path) return path def _find_weights(weights_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(weights_dir))[-1] weights_path = os.path.join(weights_dir, file_name) elif mode == 'best': raise NotImplementedError else: raise NotImplementedError return weights_path def _find_model(model_chkp_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(model_chkp_dir))[-1] model_path = os.path.join(model_chkp_dir, file_name) elif mode == 'best': raise NotImplementedError return model_path def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto', weights_path='auto', model_path='auto', custom_objects=None): if config_path == 'auto': config_path = os.path.join(model_dir, 'config.json') if graph_path == 'auto': graph_path = os.path.join(model_dir, 'graph.json') if weights_path == 'auto': weights_dir = os.path.join(model_dir, 'weights') weights_path = _find_weights(weights_dir) if model_path == 'auto': model_chkp_dir = os.path.join(model_dir, 'models') model_path = _find_model(model_chkp_dir) # load configuration file config = OrthoSegmModelConfig.load_config(config_path) # load model graph file with open(graph_path, 'r') as f: graph = json.load(f) if mode == 'train': model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True) if mode == 'inference': try: model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False) except: model = keras.models.model_from_json(json.dumps(graph)) model.load_weights(weights_path) segmentation_model = SegmentationModel(model_dir) segmentation_model.build(model, config) return segmentation_model class SegmentationModel(Model): """ """ def __init__(self, model_dir): self.config = None self.model = None self._built = False self.model_dir = _create_dir(model_dir) self.log_dir = _create_dir(model_dir, 'log') self.weights_dir = _create_dir(model_dir, 'weights') self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs # input standardization pipeline function self._input_standart = None def __getattr__(self, attr): return getattr(self.model, attr) def build(self, model, config): self.model = model self.config = config # save configurations of model config_path = os.path.join(self.model_dir, 'config.json') if not os.path.exists(config_path): self.config.save(config_path, indent=2) # save graph of model graph_path = os.path.join(self.model_dir, 'graph.json') model_graph = json.loads(model.to_json()) with open(graph_path, 'w') as f: json.dump(model_graph, f, indent=2) st = Standardizer(**self.config.STANDARDISING_PARAMS) self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS) self._built = True def built(func): @wraps(func) def wrapped(self, *args, **kwargs): if self._built: return func(self, *args, **kwargs) else: raise RuntimeError('Your model is not built! Please provide keras model and config.') return wrapped @built def _get_gsd(self): gsd = self.config.GSD if np.isscalar(gsd): gsd = (gsd, gsd) gsd_x = gsd[0] gsd_y = gsd[1] return gsd_x, gsd_y @built def _load_image(self, path, target_size=None, return_transform=False, return_crs=True): dataset_element_name = os.path.basename(path) path = os.path.normpath(path) channels = self.config.CHANNELS target_gsd_x, target_gsd_y = self._get_gsd() # defining local variables for memorizing best of them during iterations transform = None crs = None min_gsd_x = 10e5 min_gsd_y = 10e5 gsd_x = min_gsd_x gsd_y = min_gsd_y max_h = 0 max_w = 0 image_ids = ['20170304', '20170404'] channels_list = [] for image_id in image_ids: channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in channels] for channel_name in channels_: try: # open image(channel) file # use 'r+' mode to support on windows >__< # (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow) with rasterio.open(channel_name, 'r+') as img_obj: # read metadata from image(channel) file tm = list(img_obj.transform) gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) crs = img_obj.crs # remember best gsd and h and w for future resizing if gsd_x * gsd_y < min_gsd_x * min_gsd_y: transform = tm min_gsd_x = gsd_x min_gsd_y = gsd_y max_h = img_obj.height max_w = img_obj.width # read channels img = img_obj.read() img = np.squeeze(img) channels_list.append(img) except FileNotFoundError: print('No such image {}'.format(os.path.basename(channel_name))) raise Exception('No channels!') # define width and heights of our images for our model gsd w = int(max_w * gsd_x / target_gsd_x) h = int(max_h * gsd_y / target_gsd_y) if target_size: w = target_size[1] h = target_size[0] channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list] image = np.array(channels_list) image = np.rollaxis(image, 0, 3) if return_transform: if return_crs: return image, transform, crs else: return image, transform return image @built def _load_masks(self, path): path = os.path.normpath(path) classes = self.config.CLASSES mask_id = os.path.basename(path) masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes] masks_list = [] for m, cls in zip(masks, classes): try: with rasterio.open(m, 'r') as mask_obj: mask = mask_obj.read() mask = np.squeeze(mask) masks_list.append(mask) except FileNotFoundError: print('No such image {}'.format(os.path.basename(m))) raise Exception('No mask for class {}!'.format(cls)) masks = np.array(masks_list) masks = np.rollaxis(masks, 0, 3) # if target_size: # cv2.resize(masks, target_size, cv2.INTER_NEAREST) return masks def _to_binary_masks(self, image, tm): gsd_x, gsd_y = self._get_gsd() target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) # define width and heights of our masks for our model gsd w = int(image.shape[1] * gsd_x / target_gsd_x) h = int(image.shape[0] * gsd_y / target_gsd_y) image = cv2.resize(image, (w, h), cv2.INTER_LINEAR) if image.ndim == 2: image = np.expand_dims(image, axis=-1) return np.rollaxis(image, 2, 0), (w, h) @built def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None, crs=None): image, shape = self._to_binary_masks(image, transform_matrix) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) w, h = shape image_basename = os.path.basename(path) saved_images_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix) saved_images_names.append(image_name) image_path = os.path.join(path, image_name) with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1, dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst: dst.write(image[i].astype(rasterio.uint8), 1) return saved_images_names def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'): """ Saves vector mask from raw model output as .geojson :param raw_mask_path: :param transform: geotransform of initial dataset :param filename: output location absolute path :param trg_crs: target coordinate reference system :param threshold: a threshold for raw mask low-pass filtering :return: """ # plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw) shapes = rasterio.features.shapes(mask, transform=geotransform) # the last shape contains all geometry shapes = list(shapes)[:-1] polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes] crs = { "type": "name", "properties": { "name": trg_crs}} gs = geojson.FeatureCollection(polygons, crs=crs) return geojson.dumps(gs) @built def
(self, image, path, save_postfix='pred', geotransform=None, trg_crs='epsg:3857', threshold=170): image, shape = self._to_binary_masks(image, geotransform) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) image_basename = os.path.basename(path) saved_geojson_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.geojson'.format(cls, save_postfix) saved_geojson_names.append(image_name) image_path = os.path.join(path, image_name) image_mask = np.array(image[i] > threshold, np.uint8) gs = self.get_vector_markup(image_mask, geotransform, trg_crs) with open(image_path, 'w') as file: file.write(gs) return saved_geojson_names @built def _standardize(self, image): return self._input_standart(image) @built def predict_orthophoto(self, path_to_object, split_size=1024, overlap=64, save=False, save_dir='same', verbose=0): image, transform_matrix, crs = self._load_image(path_to_object, return_transform=True, return_crs=True) h, w = image.shape[:2] h = get_shape(h, split_size, overlap) w = get_shape(w, split_size, overlap) image, paddings = pad_shape(image, (h, w)) images, n_rows, n_cols = overlap_split(image, split_size, overlap) predictions = [] progbar = generic_utils.Progbar(len(images), verbose=verbose) if verbose: print('Predicting image pieces...') for image in images: image = self._standardize(image) image = np.expand_dims(image, axis=0) # input image have to be 4d tensor pred = self.model.predict(image) pred = np.squeeze(pred) # delete useless 0 axis predictions.append(pred) progbar.add(1) if len(predictions) > 1: prediction = overlap_concatenate(predictions, n_rows, n_cols, overlap) else: prediction = np.squeeze(predictions) prediction = unpad(prediction, paddings) if save: if save_dir == 'same': save_dir = path_to_object round_prediction = np.round(prediction) self._save_masks(round_prediction, save_dir, transform_matrix=transform_matrix, crs=crs) return prediction, transform_matrix
_save_vector_masks
identifier_name
model.py
""" Model is a wrapper over a set of KERAS models! implements interface for learning over a generator + dataset or statically generated data and for prediction (for all classes) """ import os import cv2 import json import keras import rasterio import numpy as np from keras.utils import generic_utils from keras.models import Model from functools import wraps import geojson from .utils import get_shape from .utils import pad_shape, unpad from .utils import overlap_split from .utils import overlap_concatenate
from .standardizer import Standardizer def _create_dir(*args): path = os.path.join(*args) if not os.path.exists(path): os.makedirs(path) return path def _find_weights(weights_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(weights_dir))[-1] weights_path = os.path.join(weights_dir, file_name) elif mode == 'best': raise NotImplementedError else: raise NotImplementedError return weights_path def _find_model(model_chkp_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(model_chkp_dir))[-1] model_path = os.path.join(model_chkp_dir, file_name) elif mode == 'best': raise NotImplementedError return model_path def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto', weights_path='auto', model_path='auto', custom_objects=None): if config_path == 'auto': config_path = os.path.join(model_dir, 'config.json') if graph_path == 'auto': graph_path = os.path.join(model_dir, 'graph.json') if weights_path == 'auto': weights_dir = os.path.join(model_dir, 'weights') weights_path = _find_weights(weights_dir) if model_path == 'auto': model_chkp_dir = os.path.join(model_dir, 'models') model_path = _find_model(model_chkp_dir) # load configuration file config = OrthoSegmModelConfig.load_config(config_path) # load model graph file with open(graph_path, 'r') as f: graph = json.load(f) if mode == 'train': model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True) if mode == 'inference': try: model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False) except: model = keras.models.model_from_json(json.dumps(graph)) model.load_weights(weights_path) segmentation_model = SegmentationModel(model_dir) segmentation_model.build(model, config) return segmentation_model class SegmentationModel(Model): """ """ def __init__(self, model_dir): self.config = None self.model = None self._built = False self.model_dir = _create_dir(model_dir) self.log_dir = _create_dir(model_dir, 'log') self.weights_dir = _create_dir(model_dir, 'weights') self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs # input standardization pipeline function self._input_standart = None def __getattr__(self, attr): return getattr(self.model, attr) def build(self, model, config): self.model = model self.config = config # save configurations of model config_path = os.path.join(self.model_dir, 'config.json') if not os.path.exists(config_path): self.config.save(config_path, indent=2) # save graph of model graph_path = os.path.join(self.model_dir, 'graph.json') model_graph = json.loads(model.to_json()) with open(graph_path, 'w') as f: json.dump(model_graph, f, indent=2) st = Standardizer(**self.config.STANDARDISING_PARAMS) self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS) self._built = True def built(func): @wraps(func) def wrapped(self, *args, **kwargs): if self._built: return func(self, *args, **kwargs) else: raise RuntimeError('Your model is not built! Please provide keras model and config.') return wrapped @built def _get_gsd(self): gsd = self.config.GSD if np.isscalar(gsd): gsd = (gsd, gsd) gsd_x = gsd[0] gsd_y = gsd[1] return gsd_x, gsd_y @built def _load_image(self, path, target_size=None, return_transform=False, return_crs=True): dataset_element_name = os.path.basename(path) path = os.path.normpath(path) channels = self.config.CHANNELS target_gsd_x, target_gsd_y = self._get_gsd() # defining local variables for memorizing best of them during iterations transform = None crs = None min_gsd_x = 10e5 min_gsd_y = 10e5 gsd_x = min_gsd_x gsd_y = min_gsd_y max_h = 0 max_w = 0 image_ids = ['20170304', '20170404'] channels_list = [] for image_id in image_ids: channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in channels] for channel_name in channels_: try: # open image(channel) file # use 'r+' mode to support on windows >__< # (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow) with rasterio.open(channel_name, 'r+') as img_obj: # read metadata from image(channel) file tm = list(img_obj.transform) gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) crs = img_obj.crs # remember best gsd and h and w for future resizing if gsd_x * gsd_y < min_gsd_x * min_gsd_y: transform = tm min_gsd_x = gsd_x min_gsd_y = gsd_y max_h = img_obj.height max_w = img_obj.width # read channels img = img_obj.read() img = np.squeeze(img) channels_list.append(img) except FileNotFoundError: print('No such image {}'.format(os.path.basename(channel_name))) raise Exception('No channels!') # define width and heights of our images for our model gsd w = int(max_w * gsd_x / target_gsd_x) h = int(max_h * gsd_y / target_gsd_y) if target_size: w = target_size[1] h = target_size[0] channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list] image = np.array(channels_list) image = np.rollaxis(image, 0, 3) if return_transform: if return_crs: return image, transform, crs else: return image, transform return image @built def _load_masks(self, path): path = os.path.normpath(path) classes = self.config.CLASSES mask_id = os.path.basename(path) masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes] masks_list = [] for m, cls in zip(masks, classes): try: with rasterio.open(m, 'r') as mask_obj: mask = mask_obj.read() mask = np.squeeze(mask) masks_list.append(mask) except FileNotFoundError: print('No such image {}'.format(os.path.basename(m))) raise Exception('No mask for class {}!'.format(cls)) masks = np.array(masks_list) masks = np.rollaxis(masks, 0, 3) # if target_size: # cv2.resize(masks, target_size, cv2.INTER_NEAREST) return masks def _to_binary_masks(self, image, tm): gsd_x, gsd_y = self._get_gsd() target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) # define width and heights of our masks for our model gsd w = int(image.shape[1] * gsd_x / target_gsd_x) h = int(image.shape[0] * gsd_y / target_gsd_y) image = cv2.resize(image, (w, h), cv2.INTER_LINEAR) if image.ndim == 2: image = np.expand_dims(image, axis=-1) return np.rollaxis(image, 2, 0), (w, h) @built def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None, crs=None): image, shape = self._to_binary_masks(image, transform_matrix) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) w, h = shape image_basename = os.path.basename(path) saved_images_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix) saved_images_names.append(image_name) image_path = os.path.join(path, image_name) with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1, dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst: dst.write(image[i].astype(rasterio.uint8), 1) return saved_images_names def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'): """ Saves vector mask from raw model output as .geojson :param raw_mask_path: :param transform: geotransform of initial dataset :param filename: output location absolute path :param trg_crs: target coordinate reference system :param threshold: a threshold for raw mask low-pass filtering :return: """ # plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw) shapes = rasterio.features.shapes(mask, transform=geotransform) # the last shape contains all geometry shapes = list(shapes)[:-1] polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes] crs = { "type": "name", "properties": { "name": trg_crs}} gs = geojson.FeatureCollection(polygons, crs=crs) return geojson.dumps(gs) @built def _save_vector_masks(self, image, path, save_postfix='pred', geotransform=None, trg_crs='epsg:3857', threshold=170): image, shape = self._to_binary_masks(image, geotransform) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) image_basename = os.path.basename(path) saved_geojson_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.geojson'.format(cls, save_postfix) saved_geojson_names.append(image_name) image_path = os.path.join(path, image_name) image_mask = np.array(image[i] > threshold, np.uint8) gs = self.get_vector_markup(image_mask, geotransform, trg_crs) with open(image_path, 'w') as file: file.write(gs) return saved_geojson_names @built def _standardize(self, image): return self._input_standart(image) @built def predict_orthophoto(self, path_to_object, split_size=1024, overlap=64, save=False, save_dir='same', verbose=0): image, transform_matrix, crs = self._load_image(path_to_object, return_transform=True, return_crs=True) h, w = image.shape[:2] h = get_shape(h, split_size, overlap) w = get_shape(w, split_size, overlap) image, paddings = pad_shape(image, (h, w)) images, n_rows, n_cols = overlap_split(image, split_size, overlap) predictions = [] progbar = generic_utils.Progbar(len(images), verbose=verbose) if verbose: print('Predicting image pieces...') for image in images: image = self._standardize(image) image = np.expand_dims(image, axis=0) # input image have to be 4d tensor pred = self.model.predict(image) pred = np.squeeze(pred) # delete useless 0 axis predictions.append(pred) progbar.add(1) if len(predictions) > 1: prediction = overlap_concatenate(predictions, n_rows, n_cols, overlap) else: prediction = np.squeeze(predictions) prediction = unpad(prediction, paddings) if save: if save_dir == 'same': save_dir = path_to_object round_prediction = np.round(prediction) self._save_masks(round_prediction, save_dir, transform_matrix=transform_matrix, crs=crs) return prediction, transform_matrix
from .config import OrthoSegmModelConfig
random_line_split
model.py
""" Model is a wrapper over a set of KERAS models! implements interface for learning over a generator + dataset or statically generated data and for prediction (for all classes) """ import os import cv2 import json import keras import rasterio import numpy as np from keras.utils import generic_utils from keras.models import Model from functools import wraps import geojson from .utils import get_shape from .utils import pad_shape, unpad from .utils import overlap_split from .utils import overlap_concatenate from .config import OrthoSegmModelConfig from .standardizer import Standardizer def _create_dir(*args): path = os.path.join(*args) if not os.path.exists(path): os.makedirs(path) return path def _find_weights(weights_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(weights_dir))[-1] weights_path = os.path.join(weights_dir, file_name) elif mode == 'best': raise NotImplementedError else: raise NotImplementedError return weights_path def _find_model(model_chkp_dir, mode='last'): """Find weights path if not provided manually during model initialization""" if mode == 'last': file_name = sorted(os.listdir(model_chkp_dir))[-1] model_path = os.path.join(model_chkp_dir, file_name) elif mode == 'best': raise NotImplementedError return model_path def load_model(model_dir, mode='inference', config_path='auto', graph_path='auto', weights_path='auto', model_path='auto', custom_objects=None): if config_path == 'auto': config_path = os.path.join(model_dir, 'config.json') if graph_path == 'auto': graph_path = os.path.join(model_dir, 'graph.json') if weights_path == 'auto': weights_dir = os.path.join(model_dir, 'weights') weights_path = _find_weights(weights_dir) if model_path == 'auto': model_chkp_dir = os.path.join(model_dir, 'models') model_path = _find_model(model_chkp_dir) # load configuration file config = OrthoSegmModelConfig.load_config(config_path) # load model graph file with open(graph_path, 'r') as f: graph = json.load(f) if mode == 'train':
if mode == 'inference': try: model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=False) except: model = keras.models.model_from_json(json.dumps(graph)) model.load_weights(weights_path) segmentation_model = SegmentationModel(model_dir) segmentation_model.build(model, config) return segmentation_model class SegmentationModel(Model): """ """ def __init__(self, model_dir): self.config = None self.model = None self._built = False self.model_dir = _create_dir(model_dir) self.log_dir = _create_dir(model_dir, 'log') self.weights_dir = _create_dir(model_dir, 'weights') self.models_dir = _create_dir(model_dir, 'models') # be careful with model and models! dirs # input standardization pipeline function self._input_standart = None def __getattr__(self, attr): return getattr(self.model, attr) def build(self, model, config): self.model = model self.config = config # save configurations of model config_path = os.path.join(self.model_dir, 'config.json') if not os.path.exists(config_path): self.config.save(config_path, indent=2) # save graph of model graph_path = os.path.join(self.model_dir, 'graph.json') model_graph = json.loads(model.to_json()) with open(graph_path, 'w') as f: json.dump(model_graph, f, indent=2) st = Standardizer(**self.config.STANDARDISING_PARAMS) self._input_standart = st.build_pipline(self.config.STANDARDISING_FUNCTIONS) self._built = True def built(func): @wraps(func) def wrapped(self, *args, **kwargs): if self._built: return func(self, *args, **kwargs) else: raise RuntimeError('Your model is not built! Please provide keras model and config.') return wrapped @built def _get_gsd(self): gsd = self.config.GSD if np.isscalar(gsd): gsd = (gsd, gsd) gsd_x = gsd[0] gsd_y = gsd[1] return gsd_x, gsd_y @built def _load_image(self, path, target_size=None, return_transform=False, return_crs=True): dataset_element_name = os.path.basename(path) path = os.path.normpath(path) channels = self.config.CHANNELS target_gsd_x, target_gsd_y = self._get_gsd() # defining local variables for memorizing best of them during iterations transform = None crs = None min_gsd_x = 10e5 min_gsd_y = 10e5 gsd_x = min_gsd_x gsd_y = min_gsd_y max_h = 0 max_w = 0 image_ids = ['20170304', '20170404'] channels_list = [] for image_id in image_ids: channels_ = [os.path.join(path, image_id, '{}_channel_{}.tif'.format(dataset_element_name, ch)) for ch in channels] for channel_name in channels_: try: # open image(channel) file # use 'r+' mode to support on windows >__< # (otherwise, in 'r' mode, cv2.resize fails with python int to C int conversion overflow) with rasterio.open(channel_name, 'r+') as img_obj: # read metadata from image(channel) file tm = list(img_obj.transform) gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) crs = img_obj.crs # remember best gsd and h and w for future resizing if gsd_x * gsd_y < min_gsd_x * min_gsd_y: transform = tm min_gsd_x = gsd_x min_gsd_y = gsd_y max_h = img_obj.height max_w = img_obj.width # read channels img = img_obj.read() img = np.squeeze(img) channels_list.append(img) except FileNotFoundError: print('No such image {}'.format(os.path.basename(channel_name))) raise Exception('No channels!') # define width and heights of our images for our model gsd w = int(max_w * gsd_x / target_gsd_x) h = int(max_h * gsd_y / target_gsd_y) if target_size: w = target_size[1] h = target_size[0] channels_list = [cv2.resize(ch, (w, h), cv2.INTER_LINEAR) for ch in channels_list] image = np.array(channels_list) image = np.rollaxis(image, 0, 3) if return_transform: if return_crs: return image, transform, crs else: return image, transform return image @built def _load_masks(self, path): path = os.path.normpath(path) classes = self.config.CLASSES mask_id = os.path.basename(path) masks = [os.path.join(path, '{}_class_{}.tif'.format(mask_id, cls)) for cls in classes] masks_list = [] for m, cls in zip(masks, classes): try: with rasterio.open(m, 'r') as mask_obj: mask = mask_obj.read() mask = np.squeeze(mask) masks_list.append(mask) except FileNotFoundError: print('No such image {}'.format(os.path.basename(m))) raise Exception('No mask for class {}!'.format(cls)) masks = np.array(masks_list) masks = np.rollaxis(masks, 0, 3) # if target_size: # cv2.resize(masks, target_size, cv2.INTER_NEAREST) return masks def _to_binary_masks(self, image, tm): gsd_x, gsd_y = self._get_gsd() target_gsd_x = np.sqrt(tm[0] ** 2 + tm[3] ** 2) target_gsd_y = np.sqrt(tm[1] ** 2 + tm[4] ** 2) # define width and heights of our masks for our model gsd w = int(image.shape[1] * gsd_x / target_gsd_x) h = int(image.shape[0] * gsd_y / target_gsd_y) image = cv2.resize(image, (w, h), cv2.INTER_LINEAR) if image.ndim == 2: image = np.expand_dims(image, axis=-1) return np.rollaxis(image, 2, 0), (w, h) @built def _save_raster_masks(self, image, path, save_postfix='pred', transform_matrix=None, crs=None): image, shape = self._to_binary_masks(image, transform_matrix) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) w, h = shape image_basename = os.path.basename(path) saved_images_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.tif'.format(cls, save_postfix) saved_images_names.append(image_name) image_path = os.path.join(path, image_name) with rasterio.open(image_path, 'w', width=w, height=h, driver='GTiff', count=1, dtype='uint8', NBITS=1, transform=transform_matrix[:6], crs=crs) as dst: dst.write(image[i].astype(rasterio.uint8), 1) return saved_images_names def get_vector_markup(self, mask, geotransform, trg_crs='epsg:3857'): """ Saves vector mask from raw model output as .geojson :param raw_mask_path: :param transform: geotransform of initial dataset :param filename: output location absolute path :param trg_crs: target coordinate reference system :param threshold: a threshold for raw mask low-pass filtering :return: """ # plt.imsave(os.path.join(time_series_path, time_frame, '_'.join([dataset_element_name, mask_name, time_frame, self.get_timestamp()])+'.png'), raw) shapes = rasterio.features.shapes(mask, transform=geotransform) # the last shape contains all geometry shapes = list(shapes)[:-1] polygons = [geojson.Feature(geometry=geojson.Polygon(shape[0]['coordinates'])) for shape in shapes] crs = { "type": "name", "properties": { "name": trg_crs}} gs = geojson.FeatureCollection(polygons, crs=crs) return geojson.dumps(gs) @built def _save_vector_masks(self, image, path, save_postfix='pred', geotransform=None, trg_crs='epsg:3857', threshold=170): image, shape = self._to_binary_masks(image, geotransform) path = os.path.normpath(path) # delete '\' or '//' in the end of filepath if not os.path.exists(path): os.makedirs(path) image_basename = os.path.basename(path) saved_geojson_names = [] for i, cls in enumerate(self.config.CLASSES): # save each mask to separate file image_name = image_basename + '_class_{}_{}.geojson'.format(cls, save_postfix) saved_geojson_names.append(image_name) image_path = os.path.join(path, image_name) image_mask = np.array(image[i] > threshold, np.uint8) gs = self.get_vector_markup(image_mask, geotransform, trg_crs) with open(image_path, 'w') as file: file.write(gs) return saved_geojson_names @built def _standardize(self, image): return self._input_standart(image) @built def predict_orthophoto(self, path_to_object, split_size=1024, overlap=64, save=False, save_dir='same', verbose=0): image, transform_matrix, crs = self._load_image(path_to_object, return_transform=True, return_crs=True) h, w = image.shape[:2] h = get_shape(h, split_size, overlap) w = get_shape(w, split_size, overlap) image, paddings = pad_shape(image, (h, w)) images, n_rows, n_cols = overlap_split(image, split_size, overlap) predictions = [] progbar = generic_utils.Progbar(len(images), verbose=verbose) if verbose: print('Predicting image pieces...') for image in images: image = self._standardize(image) image = np.expand_dims(image, axis=0) # input image have to be 4d tensor pred = self.model.predict(image) pred = np.squeeze(pred) # delete useless 0 axis predictions.append(pred) progbar.add(1) if len(predictions) > 1: prediction = overlap_concatenate(predictions, n_rows, n_cols, overlap) else: prediction = np.squeeze(predictions) prediction = unpad(prediction, paddings) if save: if save_dir == 'same': save_dir = path_to_object round_prediction = np.round(prediction) self._save_masks(round_prediction, save_dir, transform_matrix=transform_matrix, crs=crs) return prediction, transform_matrix
model = keras.models.load_model(model_path, custom_objects=custom_objects, compile=True)
conditional_block
run_ERDCA.py
import sys,os import data_processing as dp import ecc_tools as tools import timeit # import pydca-ER module import matplotlib #matplotlib.use('agg') import matplotlib.pyplot as plt from pydca.erdca import erdca from pydca.sequence_backmapper import sequence_backmapper from pydca.msa_trimmer import msa_trimmer from pydca.msa_trimmer.msa_trimmer import MSATrimmerException from pydca.dca_utilities import dca_utilities import numpy as np import pickle from gen_ROC_jobID_df import add_ROC # Import Bio data processing features import Bio.PDB, warnings from Bio.PDB import * pdb_list = Bio.PDB.PDBList() pdb_parser = Bio.PDB.PDBParser() from scipy.spatial import distance_matrix from Bio import BiopythonWarning from pydca.sequence_backmapper import sequence_backmapper from pydca.msa_trimmer import msa_trimmer from pydca.contact_visualizer import contact_visualizer from pydca.dca_utilities import dca_utilities warnings.filterwarnings("error") warnings.simplefilter('ignore', BiopythonWarning) warnings.simplefilter('ignore', DeprecationWarning) warnings.simplefilter('ignore', FutureWarning) warnings.simplefilter('ignore', ResourceWarning) warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") #======================================================================================== data_path = '/data/cresswellclayec/hoangd2_data/Pfam-A.full' preprocess_path = '/data/cresswellclayec/DCA_ER/biowulf/pfam_ecc/' data_path = '/home/eclay/Pfam-A.full' preprocess_path = '/home/eclay/DCA_ER/biowulf/pfam_ecc/' #pfam_id = 'PF00025' pfam_id = sys.argv[1] cpus_per_job = int(sys.argv[2]) job_id = sys.argv[3] print("Calculating DI for %s using %d (of %d) threads (JOBID: %s)"%(pfam_id,cpus_per_job-4,cpus_per_job,job_id)) # Read in Reference Protein Structure pdb = np.load('%s/%s/pdb_refs.npy'%(data_path,pfam_id)) # convert bytes to str (python 2 to python 3) pdb = np.array([pdb[t,i].decode('UTF-8') for t in range(pdb.shape[0]) for i in range(pdb.shape[1])]).reshape(pdb.shape[0],pdb.shape[1]) ipdb = 0 tpdb = int(pdb[ipdb,1]) print('Ref Sequence # should be : ',tpdb-1) # Load Multiple Sequence Alignment s = dp.load_msa(data_path,pfam_id) # Load Polypeptide Sequence from PDB as reference sequence print(pdb[ipdb,:]) pdb_id = pdb[ipdb,5] pdb_chain = pdb[ipdb,6] pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8]) pdb_range = [pdb_start-1, pdb_end] #print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1) #print('download pdb file') pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb') #pdb_file = pdb_list.retrieve_pdb_file(pdb_id) pfam_dict = {} #---------------------------------------------------------------------------------------------------------------------# #--------------------------------------- Create PDB-PP Reference Sequence --------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# msa_file, ref_file = tools.write_FASTA(s[tpdb], s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='orig') erdca_visualizer = contact_visualizer.DCAVisualizer('protein', pdb[ipdb,6], pdb[ipdb,5],refseq_file=ref_file) biomol_info,er_pdb_seq = erdca_visualizer.pdb_content.pdb_chain_sequences[erdca_visualizer.pdb_chain_id] print('\n\nERDCA-Visualizer pdb seq') print(er_pdb_seq) erdca_msa_file, erdca_ref_file = tools.write_FASTA(er_pdb_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') #---------------------------------------------------------------------------------------------------------------------# if 1: # DCA read in # Load Multiple Sequence Alignment s = dp.load_msa(data_path,pfam_id) # Load Polypeptide Sequence from PDB as reference sequence print(pdb[ipdb,:]) pdb_id = pdb[ipdb,5] pdb_chain = pdb[ipdb,6] pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8]) pdb_range = [pdb_start-1, pdb_end] #print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1) #print('download pdb file') pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb') #pdb_file = pdb_list.retrieve_pdb_file(pdb_id) pfam_dict = {} #---------------------------------------------------------------------------------------------------------------------# chain = pdb_parser.get_structure(str(pdb_id),pdb_file)[0][pdb_chain] ppb = PPBuilder().build_peptides(chain) # print(pp.get_sequence()) print('peptide build of chain produced %d elements'%(len(ppb))) matching_seq_dict = {} poly_seq = list() for i,pp in enumerate(ppb): for char in str(pp.get_sequence()): poly_seq.append(char) print('PDB Polypeptide Sequence: \n',poly_seq) #check that poly_seq matches up with given MSA poly_seq_range = poly_seq[pdb_range[0]:pdb_range[1]] print('PDB Polypeptide Sequence (In Proteins PDB range len=%d): \n'%len(poly_seq_range),poly_seq_range) if len(poly_seq_range) < 10: print('PP sequence overlap with PDB range is too small.\nWe will find a match\nBAD PDB-RANGE') poly_seq_range = poly_seq else: pp_msa_file_range, pp_ref_file_range = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='range') erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') pp_msa_file, pp_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') #---------------------------------------------------------------------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# #---------------------------------- PreProcess FASTA Alignment -------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# preprocessed_data_outfile = preprocess_path+'MSA_%s_PreProcessed.fa'%pfam_id print(preprocessed_data_outfile) print('\n\nPre-Processing MSA with Range PP Seq\n\n') trimmer = msa_trimmer.MSATrimmer( erdca_msa_file, biomolecule='PROTEIN', refseq_file = erdca_ref_file ) pfam_dict['ref_file'] = erdca_ref_file try: preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False) except(MSATrimmerException): ERR = 'PPseq-MSA' print('Error with MSA trimms\n%s\n'%ERR) sys.exit() print('\n\n\n',s[s_ipdb]) #write trimmed msa to file in FASTA format with open(preprocessed_data_outfile, 'w') as fh: for seqid, seq in preprocessed_data: fh.write('>{}\n{}\n'.format(seqid, seq)) #---------------------------------------------------------------------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# #----------------------------------------- Run Simulation ERDCA ------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# try: print('Initializing ER instance\n\n') # Compute DI scores using Expectation Reflection algorithm erdca_inst = erdca.ERDCA( preprocessed_data_outfile, 'PROTEIN', s_index = s_index, pseudocount = 0.5, num_threads = cpus_per_job-4, seqid = 0.8) except: ref_seq = s[tpdb,:] print('Using PDB defined reference sequence from MSA:\n',ref_seq) msa_file, ref_file = tools.write_FASTA(ref_seq, s, pfam_id, number_form=False,processed=False,path=preprocess_path) pfam_dict['ref_file'] = ref_file print('Re-trimming MSA with pdb index defined ref_seq') # create MSATrimmer instance trimmer = msa_trimmer.MSATrimmer( msa_file, biomolecule='protein', refseq_file=ref_file ) preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False) #write trimmed msa to file in FASTA format with open(preprocessed_data_outfile, 'w') as fh: for seqid, seq in preprocessed_data: fh.write('>{}\n{}\n'.format(seqid, seq)) erdca_inst = erdca.ERDCA( preprocessed_data_outfile, 'PROTEIN', s_index = s_index, pseudocount = 0.5, num_threads = cpus_per_job-4, seqid = 0.8) # Save processed data dictionary and FASTA file pfam_dict['processed_msa'] = preprocessed_data pfam_dict['msa'] = s pfam_dict['s_index'] = s_index pfam_dict['s_ipdb'] = s_ipdb pfam_dict['cols_removed'] = cols_removed input_data_file = preprocess_path+"%s_DP_ER.pickle"%(pfam_id) with open(input_data_file,"wb") as f: pickle.dump(pfam_dict, f) f.close() print('Running ER simulation\n\n') # Compute average product corrected Frobenius norm of the couplings start_time = timeit.default_timer() erdca_DI = erdca_inst.compute_sorted_DI() run_time = timeit.default_timer() - start_time print('ER run time:',run_time) for site_pair, score in erdca_DI[:5]:
with open('DI/ER/er_DI_%s.pickle'%(pfam_id), 'wb') as f: pickle.dump(erdca_DI, f) f.close() #---------------------------------------------------------------------------------------------------------------------# plotting = False if plotting: # Print Details of protein PDB structure Info for contact visualizeation print('Using chain ',pdb_chain) print('PDB ID: ', pdb_id) from pydca.contact_visualizer import contact_visualizer visualizer = contact_visualizer.DCAVisualizer('protein', pdb_chain, pdb_id, refseq_file = pp_ref_file, sorted_dca_scores = erdca_DI, linear_dist = 4, contact_dist = 8.) contact_map_data = visualizer.plot_contact_map() #plt.show() #plt.close() tp_rate_data = visualizer.plot_true_positive_rates() #plt.show() #plt.close() #print('Contact Map: \n',contact_map_data[:10]) #print('TP Rates: \n',tp_rate_data[:10]) with open(preprocess_path+'ER_%s_contact_map_data.pickle'%(pfam_id), 'wb') as f: pickle.dump(contact_map_data, f) f.close() with open(preprocess_path+'ER_%s_tp_rate_data.pickle'%(pfam_id), 'wb') as f: pickle.dump(tp_rate_data, f) f.close()
print(site_pair, score)
conditional_block
run_ERDCA.py
import sys,os import data_processing as dp import ecc_tools as tools import timeit # import pydca-ER module import matplotlib #matplotlib.use('agg') import matplotlib.pyplot as plt from pydca.erdca import erdca from pydca.sequence_backmapper import sequence_backmapper from pydca.msa_trimmer import msa_trimmer from pydca.msa_trimmer.msa_trimmer import MSATrimmerException from pydca.dca_utilities import dca_utilities import numpy as np import pickle from gen_ROC_jobID_df import add_ROC # Import Bio data processing features import Bio.PDB, warnings from Bio.PDB import * pdb_list = Bio.PDB.PDBList() pdb_parser = Bio.PDB.PDBParser() from scipy.spatial import distance_matrix from Bio import BiopythonWarning from pydca.sequence_backmapper import sequence_backmapper from pydca.msa_trimmer import msa_trimmer from pydca.contact_visualizer import contact_visualizer from pydca.dca_utilities import dca_utilities warnings.filterwarnings("error") warnings.simplefilter('ignore', BiopythonWarning) warnings.simplefilter('ignore', DeprecationWarning) warnings.simplefilter('ignore', FutureWarning) warnings.simplefilter('ignore', ResourceWarning) warnings.filterwarnings("ignore", message="numpy.dtype size changed") warnings.filterwarnings("ignore", message="numpy.ufunc size changed") #======================================================================================== data_path = '/data/cresswellclayec/hoangd2_data/Pfam-A.full' preprocess_path = '/data/cresswellclayec/DCA_ER/biowulf/pfam_ecc/' data_path = '/home/eclay/Pfam-A.full' preprocess_path = '/home/eclay/DCA_ER/biowulf/pfam_ecc/' #pfam_id = 'PF00025' pfam_id = sys.argv[1] cpus_per_job = int(sys.argv[2]) job_id = sys.argv[3] print("Calculating DI for %s using %d (of %d) threads (JOBID: %s)"%(pfam_id,cpus_per_job-4,cpus_per_job,job_id)) # Read in Reference Protein Structure pdb = np.load('%s/%s/pdb_refs.npy'%(data_path,pfam_id)) # convert bytes to str (python 2 to python 3) pdb = np.array([pdb[t,i].decode('UTF-8') for t in range(pdb.shape[0]) for i in range(pdb.shape[1])]).reshape(pdb.shape[0],pdb.shape[1]) ipdb = 0 tpdb = int(pdb[ipdb,1]) print('Ref Sequence # should be : ',tpdb-1) # Load Multiple Sequence Alignment s = dp.load_msa(data_path,pfam_id) # Load Polypeptide Sequence from PDB as reference sequence print(pdb[ipdb,:]) pdb_id = pdb[ipdb,5] pdb_chain = pdb[ipdb,6] pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8]) pdb_range = [pdb_start-1, pdb_end] #print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1) #print('download pdb file') pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb') #pdb_file = pdb_list.retrieve_pdb_file(pdb_id) pfam_dict = {} #---------------------------------------------------------------------------------------------------------------------# #--------------------------------------- Create PDB-PP Reference Sequence --------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# msa_file, ref_file = tools.write_FASTA(s[tpdb], s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='orig') erdca_visualizer = contact_visualizer.DCAVisualizer('protein', pdb[ipdb,6], pdb[ipdb,5],refseq_file=ref_file) biomol_info,er_pdb_seq = erdca_visualizer.pdb_content.pdb_chain_sequences[erdca_visualizer.pdb_chain_id] print('\n\nERDCA-Visualizer pdb seq') print(er_pdb_seq) erdca_msa_file, erdca_ref_file = tools.write_FASTA(er_pdb_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') #---------------------------------------------------------------------------------------------------------------------# if 1: # DCA read in # Load Multiple Sequence Alignment s = dp.load_msa(data_path,pfam_id) # Load Polypeptide Sequence from PDB as reference sequence print(pdb[ipdb,:]) pdb_id = pdb[ipdb,5] pdb_chain = pdb[ipdb,6] pdb_start,pdb_end = int(pdb[ipdb,7]),int(pdb[ipdb,8]) pdb_range = [pdb_start-1, pdb_end] #print('pdb id, chain, start, end, length:',pdb_id,pdb_chain,pdb_start,pdb_end,pdb_end-pdb_start+1) #print('download pdb file') pdb_file = pdb_list.retrieve_pdb_file(str(pdb_id),file_format='pdb') #pdb_file = pdb_list.retrieve_pdb_file(pdb_id) pfam_dict = {} #---------------------------------------------------------------------------------------------------------------------# chain = pdb_parser.get_structure(str(pdb_id),pdb_file)[0][pdb_chain] ppb = PPBuilder().build_peptides(chain) # print(pp.get_sequence()) print('peptide build of chain produced %d elements'%(len(ppb))) matching_seq_dict = {} poly_seq = list() for i,pp in enumerate(ppb): for char in str(pp.get_sequence()): poly_seq.append(char) print('PDB Polypeptide Sequence: \n',poly_seq) #check that poly_seq matches up with given MSA poly_seq_range = poly_seq[pdb_range[0]:pdb_range[1]] print('PDB Polypeptide Sequence (In Proteins PDB range len=%d): \n'%len(poly_seq_range),poly_seq_range) if len(poly_seq_range) < 10: print('PP sequence overlap with PDB range is too small.\nWe will find a match\nBAD PDB-RANGE') poly_seq_range = poly_seq else: pp_msa_file_range, pp_ref_file_range = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/',nickname='range') erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq_range, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') pp_msa_file, pp_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') erdca_msa_file, erdca_ref_file = tools.write_FASTA(poly_seq, s, pfam_id, number_form=False,processed=False,path='./pfam_ecc/') #---------------------------------------------------------------------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------#
print(preprocessed_data_outfile) print('\n\nPre-Processing MSA with Range PP Seq\n\n') trimmer = msa_trimmer.MSATrimmer( erdca_msa_file, biomolecule='PROTEIN', refseq_file = erdca_ref_file ) pfam_dict['ref_file'] = erdca_ref_file try: preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False) except(MSATrimmerException): ERR = 'PPseq-MSA' print('Error with MSA trimms\n%s\n'%ERR) sys.exit() print('\n\n\n',s[s_ipdb]) #write trimmed msa to file in FASTA format with open(preprocessed_data_outfile, 'w') as fh: for seqid, seq in preprocessed_data: fh.write('>{}\n{}\n'.format(seqid, seq)) #---------------------------------------------------------------------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# #----------------------------------------- Run Simulation ERDCA ------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# try: print('Initializing ER instance\n\n') # Compute DI scores using Expectation Reflection algorithm erdca_inst = erdca.ERDCA( preprocessed_data_outfile, 'PROTEIN', s_index = s_index, pseudocount = 0.5, num_threads = cpus_per_job-4, seqid = 0.8) except: ref_seq = s[tpdb,:] print('Using PDB defined reference sequence from MSA:\n',ref_seq) msa_file, ref_file = tools.write_FASTA(ref_seq, s, pfam_id, number_form=False,processed=False,path=preprocess_path) pfam_dict['ref_file'] = ref_file print('Re-trimming MSA with pdb index defined ref_seq') # create MSATrimmer instance trimmer = msa_trimmer.MSATrimmer( msa_file, biomolecule='protein', refseq_file=ref_file ) preprocessed_data,s_index, cols_removed,s_ipdb,s = trimmer.get_preprocessed_msa(printing=True, saving = False) #write trimmed msa to file in FASTA format with open(preprocessed_data_outfile, 'w') as fh: for seqid, seq in preprocessed_data: fh.write('>{}\n{}\n'.format(seqid, seq)) erdca_inst = erdca.ERDCA( preprocessed_data_outfile, 'PROTEIN', s_index = s_index, pseudocount = 0.5, num_threads = cpus_per_job-4, seqid = 0.8) # Save processed data dictionary and FASTA file pfam_dict['processed_msa'] = preprocessed_data pfam_dict['msa'] = s pfam_dict['s_index'] = s_index pfam_dict['s_ipdb'] = s_ipdb pfam_dict['cols_removed'] = cols_removed input_data_file = preprocess_path+"%s_DP_ER.pickle"%(pfam_id) with open(input_data_file,"wb") as f: pickle.dump(pfam_dict, f) f.close() print('Running ER simulation\n\n') # Compute average product corrected Frobenius norm of the couplings start_time = timeit.default_timer() erdca_DI = erdca_inst.compute_sorted_DI() run_time = timeit.default_timer() - start_time print('ER run time:',run_time) for site_pair, score in erdca_DI[:5]: print(site_pair, score) with open('DI/ER/er_DI_%s.pickle'%(pfam_id), 'wb') as f: pickle.dump(erdca_DI, f) f.close() #---------------------------------------------------------------------------------------------------------------------# plotting = False if plotting: # Print Details of protein PDB structure Info for contact visualizeation print('Using chain ',pdb_chain) print('PDB ID: ', pdb_id) from pydca.contact_visualizer import contact_visualizer visualizer = contact_visualizer.DCAVisualizer('protein', pdb_chain, pdb_id, refseq_file = pp_ref_file, sorted_dca_scores = erdca_DI, linear_dist = 4, contact_dist = 8.) contact_map_data = visualizer.plot_contact_map() #plt.show() #plt.close() tp_rate_data = visualizer.plot_true_positive_rates() #plt.show() #plt.close() #print('Contact Map: \n',contact_map_data[:10]) #print('TP Rates: \n',tp_rate_data[:10]) with open(preprocess_path+'ER_%s_contact_map_data.pickle'%(pfam_id), 'wb') as f: pickle.dump(contact_map_data, f) f.close() with open(preprocess_path+'ER_%s_tp_rate_data.pickle'%(pfam_id), 'wb') as f: pickle.dump(tp_rate_data, f) f.close()
#---------------------------------- PreProcess FASTA Alignment -------------------------------------------------------# #---------------------------------------------------------------------------------------------------------------------# preprocessed_data_outfile = preprocess_path+'MSA_%s_PreProcessed.fa'%pfam_id
random_line_split
buffer.rs
//! Buffer implementation like Bytes / BytesMut. //! //! It is simpler and contains less unsafe code. use std::default::Default; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::mem; use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::slice; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, ReadBuf}; /// A buffer structure, like Bytes/BytesMut. /// /// It is not much more than a wrapper around Vec. pub struct Buffer { start_offset: usize, rd_pos: usize, data: Vec<u8>, } impl Buffer { /// Create new Buffer. pub fn new() -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::new(), } } /// Create new Buffer. pub fn with_capacity(cap: usize) -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::with_capacity(Self::round_size_up(cap)), } } /// Clear this buffer. pub fn clear(&mut self) { self.start_offset = 0; self.rd_pos = 0; self.data.truncate(0); } /// Truncate this buffer. pub fn truncate(&mut self, size: usize) { if size == 0 { self.clear(); return; } if size > self.len() { panic!("Buffer::truncate(size): size > self.len()"); } if self.rd_pos > size { self.rd_pos = size; } self.data.truncate(size + self.start_offset); } pub fn bytes(&self) -> &[u8] { if self.rd_pos >= self.len() { return &[][..]; } &self.data[self.start_offset + self.rd_pos..] } /// Split this Buffer in two parts. /// /// The first part remains in this buffer. The second part is /// returned as a new Buffer. pub fn split_off(&mut self, at: usize) -> Buffer { if at > self.len() { panic!("Buffer:split_off(size): size > self.len()"); } if self.rd_pos > at { self.rd_pos = at; } // If "header" < 32K and "body" >= 32K, use a start_offset // for "body" and copy "header". if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 { let mut bnew = Buffer::with_capacity(at); mem::swap(self, &mut bnew); self.extend_from_slice(&bnew[0..at]); bnew.start_offset = at; return bnew; } let mut bnew = Buffer::new(); let bytes = self.bytes(); bnew.extend_from_slice(&bytes[at..]); self.truncate(at); bnew } /// Add data to this buffer. #[inline] pub fn extend_from_slice(&mut self, extend: &[u8]) { self.reserve(extend.len()); self.data.extend_from_slice(extend); } #[inline] fn round_size_up(size: usize) -> usize { if size < 128 { 128 } else if size < 4096 { 4096 } else if size < 65536 { 65536 } else if size < 2097152 { size.next_power_of_two() } else { (1 + size / 1048576) * 1048576 } } /// Make sure at least `size` bytes are available. #[inline] pub fn reserve(&mut self, size: usize) { let end = self.data.len() + size; if end < self.data.capacity() { return; } self.data.reserve_exact(Self::round_size_up(end) - self.data.len()); } /// total length of all data in this Buffer. #[inline] pub fn len(&self) -> usize { self.data.len() - self.start_offset } /// Split this Buffer in two parts. /// /// The second part remains in this buffer. The first part is /// returned to the caller. pub fn split_to(&mut self, size: usize) -> Buffer { let mut other = self.split_off(size); mem::swap(self, &mut other); other } /// Write all data in this `Buffer` to a file. pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> { while self.rd_pos < self.len() { let bytes = self.bytes(); let size = bytes.len(); file.write_all(bytes)?; self.rd_pos += size; } Ok(()) } /// Add text data to this buffer. #[inline] pub fn push_str(&mut self, s: &str)
/// Add a string to the buffer. #[inline] pub fn put_str(&mut self, s: impl AsRef<str>) { self.extend_from_slice(s.as_ref().as_bytes()); } /// Return a reference to this Buffer as an UTF-8 string. #[inline] pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> { std::str::from_utf8(self.bytes()) } /// Convert this buffer into a Vec<u8>. pub fn into_bytes(self) -> Vec<u8> { if self.start_offset > 0 { let mut v = Vec::with_capacity(Self::round_size_up(self.len())); v.extend_from_slice(self.bytes()); v } else { self.data } } // // ===== Begin unsafe code ===== // /// Read an exact number of bytes. pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> { self.reserve(len); // Safety: it is safe for a std::fs::File to read into uninitialized memory. unsafe { let buf = self.spare_capacity_mut(); reader.read_exact(&mut buf[..len])?; self.advance_mut(len); } Ok(()) } unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] { let len = self.data.len(); let spare = self.data.capacity() - len; let ptr = self.data.as_mut_ptr().add(len) as *mut T; &mut slice::from_raw_parts_mut(ptr, spare)[..] } unsafe fn advance_mut(&mut self, cnt: usize) { if self.data.len() + cnt > self.data.capacity() { panic!("Buffer::advance_mut(cnt): would advance past end of Buffer"); } self.data.set_len(self.data.len() + cnt); } pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> where R: AsyncRead + Unpin + ?Sized { // Safety: ReadBuf::uninit takes a MaybeUninit. let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() }); futures::ready!(reader.poll_read(cx, &mut buf))?; let len = buf.filled().len(); // Safety: len = buf.filled().len() is guaranteed to be correct. unsafe { self.advance_mut(len); } Poll::Ready(Ok(len)) } // // ===== End unsafe code ===== // } impl bytes::Buf for Buffer { fn advance(&mut self, cnt: usize) { // advance buffer read pointer. self.rd_pos += cnt; if self.rd_pos > self.len() { // "It is recommended for implementations of advance to // panic if cnt > self.remaining()" panic!("read position advanced beyond end of buffer"); } } #[inline] fn chunk(&self) -> &[u8] { self.bytes() } #[inline] fn remaining(&self) -> usize { self.len() - self.rd_pos } } impl Deref for Buffer { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.bytes() } } impl DerefMut for Buffer { #[inline] fn deref_mut(&mut self) -> &mut [u8] { &mut self.data[self.start_offset + self.rd_pos..] } } impl fmt::Write for Buffer { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s); Ok(()) } } impl From<&[u8]> for Buffer { fn from(src: &[u8]) -> Self { let mut buffer = Buffer::new(); buffer.extend_from_slice(src); buffer } } impl From<Vec<u8>> for Buffer { fn from(src: Vec<u8>) -> Self { Buffer { start_offset: 0, rd_pos: 0, data: src, } } } impl From<&str> for Buffer { fn from(src: &str) -> Self { Buffer::from(src.as_bytes()) } } impl From<String> for Buffer { fn from(src: String) -> Self { Buffer::from(src.into_bytes()) } } impl From<bytes::Bytes> for Buffer { fn from(src: bytes::Bytes) -> Self { Buffer::from(&src[..]) } } impl Default for Buffer { fn default() -> Self { Buffer::new() } } impl fmt::Debug for Buffer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let cap = self.data.capacity(); let len = self.len(); f.debug_struct("Buffer") .field("start_offset", &self.start_offset) .field("rd_pos", &self.rd_pos) .field("len", &len) .field("capacity", &cap) .field("data", &"[data]") .finish() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_buffer() { let mut b = Buffer::new(); b.reserve(4096); b.start_offset = 23; b.data.resize(b.start_offset, 0); for _ in 0..50000 { b.put_str("xyzzyxyzzy"); } assert!(b.len() == 500000); assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]); } #[test] fn test_split() { let mut b = Buffer::new(); for _ in 0..5000 { b.put_str("xyzzyxyzzyz"); } assert!(b.len() == 55000); let mut n = b.split_off(4918); assert!(b.len() == 4918); assert!(n.len() == 50082); println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap()); println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap()); assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]); assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]); n.start_offset += 13; let x = n.split_to(20000); println!("3. n.len() {}", n.len()); println!("4. x.len() {}", x.len()); println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap()); println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap()); assert!(n.len() == 30069); assert!(x.len() == 20000); assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]); assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]); } #[test] fn test_spare() { let mut b = Buffer::with_capacity(194); assert!(b.data.capacity() == 4096); b.extend_from_slice(b"0123456789"); let buf: &mut [u8] = unsafe { b.spare_capacity_mut() }; assert!(buf.len() == 4086); } }
{ self.extend_from_slice(s.as_bytes()); }
identifier_body
buffer.rs
//! Buffer implementation like Bytes / BytesMut. //! //! It is simpler and contains less unsafe code. use std::default::Default; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::mem; use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::slice; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, ReadBuf}; /// A buffer structure, like Bytes/BytesMut. /// /// It is not much more than a wrapper around Vec. pub struct Buffer { start_offset: usize, rd_pos: usize, data: Vec<u8>, } impl Buffer { /// Create new Buffer. pub fn new() -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::new(), } } /// Create new Buffer. pub fn with_capacity(cap: usize) -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::with_capacity(Self::round_size_up(cap)), } } /// Clear this buffer. pub fn clear(&mut self) { self.start_offset = 0; self.rd_pos = 0; self.data.truncate(0); } /// Truncate this buffer. pub fn truncate(&mut self, size: usize) { if size == 0 { self.clear(); return; } if size > self.len() { panic!("Buffer::truncate(size): size > self.len()"); } if self.rd_pos > size { self.rd_pos = size; } self.data.truncate(size + self.start_offset); } pub fn bytes(&self) -> &[u8] { if self.rd_pos >= self.len() { return &[][..]; } &self.data[self.start_offset + self.rd_pos..] } /// Split this Buffer in two parts. /// /// The first part remains in this buffer. The second part is /// returned as a new Buffer. pub fn split_off(&mut self, at: usize) -> Buffer { if at > self.len() { panic!("Buffer:split_off(size): size > self.len()"); } if self.rd_pos > at { self.rd_pos = at; } // If "header" < 32K and "body" >= 32K, use a start_offset // for "body" and copy "header". if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 { let mut bnew = Buffer::with_capacity(at); mem::swap(self, &mut bnew); self.extend_from_slice(&bnew[0..at]); bnew.start_offset = at; return bnew; } let mut bnew = Buffer::new(); let bytes = self.bytes(); bnew.extend_from_slice(&bytes[at..]); self.truncate(at); bnew } /// Add data to this buffer. #[inline] pub fn extend_from_slice(&mut self, extend: &[u8]) { self.reserve(extend.len()); self.data.extend_from_slice(extend); } #[inline] fn round_size_up(size: usize) -> usize { if size < 128 { 128 } else if size < 4096 { 4096 } else if size < 65536 { 65536 } else if size < 2097152 { size.next_power_of_two() } else { (1 + size / 1048576) * 1048576 } } /// Make sure at least `size` bytes are available. #[inline] pub fn reserve(&mut self, size: usize) { let end = self.data.len() + size; if end < self.data.capacity() { return; } self.data.reserve_exact(Self::round_size_up(end) - self.data.len()); } /// total length of all data in this Buffer. #[inline] pub fn len(&self) -> usize { self.data.len() - self.start_offset } /// Split this Buffer in two parts. /// /// The second part remains in this buffer. The first part is /// returned to the caller. pub fn split_to(&mut self, size: usize) -> Buffer { let mut other = self.split_off(size); mem::swap(self, &mut other); other } /// Write all data in this `Buffer` to a file. pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> { while self.rd_pos < self.len() { let bytes = self.bytes(); let size = bytes.len(); file.write_all(bytes)?; self.rd_pos += size; } Ok(()) } /// Add text data to this buffer. #[inline] pub fn push_str(&mut self, s: &str) { self.extend_from_slice(s.as_bytes()); } /// Add a string to the buffer. #[inline] pub fn put_str(&mut self, s: impl AsRef<str>) { self.extend_from_slice(s.as_ref().as_bytes()); } /// Return a reference to this Buffer as an UTF-8 string. #[inline] pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> { std::str::from_utf8(self.bytes()) } /// Convert this buffer into a Vec<u8>. pub fn into_bytes(self) -> Vec<u8> { if self.start_offset > 0 { let mut v = Vec::with_capacity(Self::round_size_up(self.len())); v.extend_from_slice(self.bytes()); v } else { self.data } } // // ===== Begin unsafe code ===== // /// Read an exact number of bytes. pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> { self.reserve(len); // Safety: it is safe for a std::fs::File to read into uninitialized memory. unsafe { let buf = self.spare_capacity_mut(); reader.read_exact(&mut buf[..len])?; self.advance_mut(len); } Ok(()) } unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] { let len = self.data.len(); let spare = self.data.capacity() - len; let ptr = self.data.as_mut_ptr().add(len) as *mut T; &mut slice::from_raw_parts_mut(ptr, spare)[..] } unsafe fn advance_mut(&mut self, cnt: usize) { if self.data.len() + cnt > self.data.capacity() { panic!("Buffer::advance_mut(cnt): would advance past end of Buffer"); } self.data.set_len(self.data.len() + cnt); } pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> where R: AsyncRead + Unpin + ?Sized { // Safety: ReadBuf::uninit takes a MaybeUninit. let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() }); futures::ready!(reader.poll_read(cx, &mut buf))?; let len = buf.filled().len(); // Safety: len = buf.filled().len() is guaranteed to be correct. unsafe { self.advance_mut(len); } Poll::Ready(Ok(len)) } // // ===== End unsafe code ===== // } impl bytes::Buf for Buffer { fn advance(&mut self, cnt: usize) { // advance buffer read pointer. self.rd_pos += cnt; if self.rd_pos > self.len() { // "It is recommended for implementations of advance to // panic if cnt > self.remaining()" panic!("read position advanced beyond end of buffer"); } } #[inline] fn chunk(&self) -> &[u8] { self.bytes() } #[inline] fn remaining(&self) -> usize { self.len() - self.rd_pos } } impl Deref for Buffer { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.bytes() } } impl DerefMut for Buffer { #[inline] fn deref_mut(&mut self) -> &mut [u8] { &mut self.data[self.start_offset + self.rd_pos..] } } impl fmt::Write for Buffer { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s); Ok(()) } } impl From<&[u8]> for Buffer { fn from(src: &[u8]) -> Self { let mut buffer = Buffer::new(); buffer.extend_from_slice(src); buffer } } impl From<Vec<u8>> for Buffer { fn from(src: Vec<u8>) -> Self { Buffer { start_offset: 0, rd_pos: 0, data: src, } } } impl From<&str> for Buffer { fn from(src: &str) -> Self { Buffer::from(src.as_bytes()) } } impl From<String> for Buffer { fn from(src: String) -> Self { Buffer::from(src.into_bytes()) } } impl From<bytes::Bytes> for Buffer { fn from(src: bytes::Bytes) -> Self { Buffer::from(&src[..]) } } impl Default for Buffer { fn default() -> Self { Buffer::new() } } impl fmt::Debug for Buffer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let cap = self.data.capacity(); let len = self.len(); f.debug_struct("Buffer") .field("start_offset", &self.start_offset) .field("rd_pos", &self.rd_pos) .field("len", &len) .field("capacity", &cap) .field("data", &"[data]") .finish() } }
#[test] fn test_buffer() { let mut b = Buffer::new(); b.reserve(4096); b.start_offset = 23; b.data.resize(b.start_offset, 0); for _ in 0..50000 { b.put_str("xyzzyxyzzy"); } assert!(b.len() == 500000); assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]); } #[test] fn test_split() { let mut b = Buffer::new(); for _ in 0..5000 { b.put_str("xyzzyxyzzyz"); } assert!(b.len() == 55000); let mut n = b.split_off(4918); assert!(b.len() == 4918); assert!(n.len() == 50082); println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap()); println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap()); assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]); assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]); n.start_offset += 13; let x = n.split_to(20000); println!("3. n.len() {}", n.len()); println!("4. x.len() {}", x.len()); println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap()); println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap()); assert!(n.len() == 30069); assert!(x.len() == 20000); assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]); assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]); } #[test] fn test_spare() { let mut b = Buffer::with_capacity(194); assert!(b.data.capacity() == 4096); b.extend_from_slice(b"0123456789"); let buf: &mut [u8] = unsafe { b.spare_capacity_mut() }; assert!(buf.len() == 4086); } }
#[cfg(test)] mod tests { use super::*;
random_line_split
buffer.rs
//! Buffer implementation like Bytes / BytesMut. //! //! It is simpler and contains less unsafe code. use std::default::Default; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::mem; use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::slice; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, ReadBuf}; /// A buffer structure, like Bytes/BytesMut. /// /// It is not much more than a wrapper around Vec. pub struct Buffer { start_offset: usize, rd_pos: usize, data: Vec<u8>, } impl Buffer { /// Create new Buffer. pub fn new() -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::new(), } } /// Create new Buffer. pub fn with_capacity(cap: usize) -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::with_capacity(Self::round_size_up(cap)), } } /// Clear this buffer. pub fn clear(&mut self) { self.start_offset = 0; self.rd_pos = 0; self.data.truncate(0); } /// Truncate this buffer. pub fn
(&mut self, size: usize) { if size == 0 { self.clear(); return; } if size > self.len() { panic!("Buffer::truncate(size): size > self.len()"); } if self.rd_pos > size { self.rd_pos = size; } self.data.truncate(size + self.start_offset); } pub fn bytes(&self) -> &[u8] { if self.rd_pos >= self.len() { return &[][..]; } &self.data[self.start_offset + self.rd_pos..] } /// Split this Buffer in two parts. /// /// The first part remains in this buffer. The second part is /// returned as a new Buffer. pub fn split_off(&mut self, at: usize) -> Buffer { if at > self.len() { panic!("Buffer:split_off(size): size > self.len()"); } if self.rd_pos > at { self.rd_pos = at; } // If "header" < 32K and "body" >= 32K, use a start_offset // for "body" and copy "header". if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 { let mut bnew = Buffer::with_capacity(at); mem::swap(self, &mut bnew); self.extend_from_slice(&bnew[0..at]); bnew.start_offset = at; return bnew; } let mut bnew = Buffer::new(); let bytes = self.bytes(); bnew.extend_from_slice(&bytes[at..]); self.truncate(at); bnew } /// Add data to this buffer. #[inline] pub fn extend_from_slice(&mut self, extend: &[u8]) { self.reserve(extend.len()); self.data.extend_from_slice(extend); } #[inline] fn round_size_up(size: usize) -> usize { if size < 128 { 128 } else if size < 4096 { 4096 } else if size < 65536 { 65536 } else if size < 2097152 { size.next_power_of_two() } else { (1 + size / 1048576) * 1048576 } } /// Make sure at least `size` bytes are available. #[inline] pub fn reserve(&mut self, size: usize) { let end = self.data.len() + size; if end < self.data.capacity() { return; } self.data.reserve_exact(Self::round_size_up(end) - self.data.len()); } /// total length of all data in this Buffer. #[inline] pub fn len(&self) -> usize { self.data.len() - self.start_offset } /// Split this Buffer in two parts. /// /// The second part remains in this buffer. The first part is /// returned to the caller. pub fn split_to(&mut self, size: usize) -> Buffer { let mut other = self.split_off(size); mem::swap(self, &mut other); other } /// Write all data in this `Buffer` to a file. pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> { while self.rd_pos < self.len() { let bytes = self.bytes(); let size = bytes.len(); file.write_all(bytes)?; self.rd_pos += size; } Ok(()) } /// Add text data to this buffer. #[inline] pub fn push_str(&mut self, s: &str) { self.extend_from_slice(s.as_bytes()); } /// Add a string to the buffer. #[inline] pub fn put_str(&mut self, s: impl AsRef<str>) { self.extend_from_slice(s.as_ref().as_bytes()); } /// Return a reference to this Buffer as an UTF-8 string. #[inline] pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> { std::str::from_utf8(self.bytes()) } /// Convert this buffer into a Vec<u8>. pub fn into_bytes(self) -> Vec<u8> { if self.start_offset > 0 { let mut v = Vec::with_capacity(Self::round_size_up(self.len())); v.extend_from_slice(self.bytes()); v } else { self.data } } // // ===== Begin unsafe code ===== // /// Read an exact number of bytes. pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> { self.reserve(len); // Safety: it is safe for a std::fs::File to read into uninitialized memory. unsafe { let buf = self.spare_capacity_mut(); reader.read_exact(&mut buf[..len])?; self.advance_mut(len); } Ok(()) } unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] { let len = self.data.len(); let spare = self.data.capacity() - len; let ptr = self.data.as_mut_ptr().add(len) as *mut T; &mut slice::from_raw_parts_mut(ptr, spare)[..] } unsafe fn advance_mut(&mut self, cnt: usize) { if self.data.len() + cnt > self.data.capacity() { panic!("Buffer::advance_mut(cnt): would advance past end of Buffer"); } self.data.set_len(self.data.len() + cnt); } pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> where R: AsyncRead + Unpin + ?Sized { // Safety: ReadBuf::uninit takes a MaybeUninit. let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() }); futures::ready!(reader.poll_read(cx, &mut buf))?; let len = buf.filled().len(); // Safety: len = buf.filled().len() is guaranteed to be correct. unsafe { self.advance_mut(len); } Poll::Ready(Ok(len)) } // // ===== End unsafe code ===== // } impl bytes::Buf for Buffer { fn advance(&mut self, cnt: usize) { // advance buffer read pointer. self.rd_pos += cnt; if self.rd_pos > self.len() { // "It is recommended for implementations of advance to // panic if cnt > self.remaining()" panic!("read position advanced beyond end of buffer"); } } #[inline] fn chunk(&self) -> &[u8] { self.bytes() } #[inline] fn remaining(&self) -> usize { self.len() - self.rd_pos } } impl Deref for Buffer { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.bytes() } } impl DerefMut for Buffer { #[inline] fn deref_mut(&mut self) -> &mut [u8] { &mut self.data[self.start_offset + self.rd_pos..] } } impl fmt::Write for Buffer { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s); Ok(()) } } impl From<&[u8]> for Buffer { fn from(src: &[u8]) -> Self { let mut buffer = Buffer::new(); buffer.extend_from_slice(src); buffer } } impl From<Vec<u8>> for Buffer { fn from(src: Vec<u8>) -> Self { Buffer { start_offset: 0, rd_pos: 0, data: src, } } } impl From<&str> for Buffer { fn from(src: &str) -> Self { Buffer::from(src.as_bytes()) } } impl From<String> for Buffer { fn from(src: String) -> Self { Buffer::from(src.into_bytes()) } } impl From<bytes::Bytes> for Buffer { fn from(src: bytes::Bytes) -> Self { Buffer::from(&src[..]) } } impl Default for Buffer { fn default() -> Self { Buffer::new() } } impl fmt::Debug for Buffer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let cap = self.data.capacity(); let len = self.len(); f.debug_struct("Buffer") .field("start_offset", &self.start_offset) .field("rd_pos", &self.rd_pos) .field("len", &len) .field("capacity", &cap) .field("data", &"[data]") .finish() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_buffer() { let mut b = Buffer::new(); b.reserve(4096); b.start_offset = 23; b.data.resize(b.start_offset, 0); for _ in 0..50000 { b.put_str("xyzzyxyzzy"); } assert!(b.len() == 500000); assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]); } #[test] fn test_split() { let mut b = Buffer::new(); for _ in 0..5000 { b.put_str("xyzzyxyzzyz"); } assert!(b.len() == 55000); let mut n = b.split_off(4918); assert!(b.len() == 4918); assert!(n.len() == 50082); println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap()); println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap()); assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]); assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]); n.start_offset += 13; let x = n.split_to(20000); println!("3. n.len() {}", n.len()); println!("4. x.len() {}", x.len()); println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap()); println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap()); assert!(n.len() == 30069); assert!(x.len() == 20000); assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]); assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]); } #[test] fn test_spare() { let mut b = Buffer::with_capacity(194); assert!(b.data.capacity() == 4096); b.extend_from_slice(b"0123456789"); let buf: &mut [u8] = unsafe { b.spare_capacity_mut() }; assert!(buf.len() == 4086); } }
truncate
identifier_name
buffer.rs
//! Buffer implementation like Bytes / BytesMut. //! //! It is simpler and contains less unsafe code. use std::default::Default; use std::fmt; use std::io::{self, Read, Write}; use std::marker::Unpin; use std::mem; use std::ops::{Deref, DerefMut}; use std::pin::Pin; use std::slice; use std::task::{Context, Poll}; use tokio::io::{AsyncRead, ReadBuf}; /// A buffer structure, like Bytes/BytesMut. /// /// It is not much more than a wrapper around Vec. pub struct Buffer { start_offset: usize, rd_pos: usize, data: Vec<u8>, } impl Buffer { /// Create new Buffer. pub fn new() -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::new(), } } /// Create new Buffer. pub fn with_capacity(cap: usize) -> Buffer { Buffer { start_offset: 0, rd_pos: 0, data: Vec::with_capacity(Self::round_size_up(cap)), } } /// Clear this buffer. pub fn clear(&mut self) { self.start_offset = 0; self.rd_pos = 0; self.data.truncate(0); } /// Truncate this buffer. pub fn truncate(&mut self, size: usize) { if size == 0 { self.clear(); return; } if size > self.len() { panic!("Buffer::truncate(size): size > self.len()"); } if self.rd_pos > size { self.rd_pos = size; } self.data.truncate(size + self.start_offset); } pub fn bytes(&self) -> &[u8] { if self.rd_pos >= self.len() { return &[][..]; } &self.data[self.start_offset + self.rd_pos..] } /// Split this Buffer in two parts. /// /// The first part remains in this buffer. The second part is /// returned as a new Buffer. pub fn split_off(&mut self, at: usize) -> Buffer { if at > self.len() { panic!("Buffer:split_off(size): size > self.len()"); } if self.rd_pos > at { self.rd_pos = at; } // If "header" < 32K and "body" >= 32K, use a start_offset // for "body" and copy "header". if self.start_offset == 0 && at < 32000 && self.len() - at >= 32000 { let mut bnew = Buffer::with_capacity(at); mem::swap(self, &mut bnew); self.extend_from_slice(&bnew[0..at]); bnew.start_offset = at; return bnew; } let mut bnew = Buffer::new(); let bytes = self.bytes(); bnew.extend_from_slice(&bytes[at..]); self.truncate(at); bnew } /// Add data to this buffer. #[inline] pub fn extend_from_slice(&mut self, extend: &[u8]) { self.reserve(extend.len()); self.data.extend_from_slice(extend); } #[inline] fn round_size_up(size: usize) -> usize { if size < 128 { 128 } else if size < 4096 { 4096 } else if size < 65536 { 65536 } else if size < 2097152 { size.next_power_of_two() } else { (1 + size / 1048576) * 1048576 } } /// Make sure at least `size` bytes are available. #[inline] pub fn reserve(&mut self, size: usize) { let end = self.data.len() + size; if end < self.data.capacity() { return; } self.data.reserve_exact(Self::round_size_up(end) - self.data.len()); } /// total length of all data in this Buffer. #[inline] pub fn len(&self) -> usize { self.data.len() - self.start_offset } /// Split this Buffer in two parts. /// /// The second part remains in this buffer. The first part is /// returned to the caller. pub fn split_to(&mut self, size: usize) -> Buffer { let mut other = self.split_off(size); mem::swap(self, &mut other); other } /// Write all data in this `Buffer` to a file. pub fn write_all(&mut self, mut file: impl Write) -> io::Result<()> { while self.rd_pos < self.len() { let bytes = self.bytes(); let size = bytes.len(); file.write_all(bytes)?; self.rd_pos += size; } Ok(()) } /// Add text data to this buffer. #[inline] pub fn push_str(&mut self, s: &str) { self.extend_from_slice(s.as_bytes()); } /// Add a string to the buffer. #[inline] pub fn put_str(&mut self, s: impl AsRef<str>) { self.extend_from_slice(s.as_ref().as_bytes()); } /// Return a reference to this Buffer as an UTF-8 string. #[inline] pub fn as_utf8_str(&self) -> Result<&str, std::str::Utf8Error> { std::str::from_utf8(self.bytes()) } /// Convert this buffer into a Vec<u8>. pub fn into_bytes(self) -> Vec<u8> { if self.start_offset > 0 { let mut v = Vec::with_capacity(Self::round_size_up(self.len())); v.extend_from_slice(self.bytes()); v } else { self.data } } // // ===== Begin unsafe code ===== // /// Read an exact number of bytes. pub fn read_exact(&mut self, reader: &mut std::fs::File, len: usize) -> io::Result<()> { self.reserve(len); // Safety: it is safe for a std::fs::File to read into uninitialized memory. unsafe { let buf = self.spare_capacity_mut(); reader.read_exact(&mut buf[..len])?; self.advance_mut(len); } Ok(()) } unsafe fn spare_capacity_mut<T>(&mut self) -> &mut [T] { let len = self.data.len(); let spare = self.data.capacity() - len; let ptr = self.data.as_mut_ptr().add(len) as *mut T; &mut slice::from_raw_parts_mut(ptr, spare)[..] } unsafe fn advance_mut(&mut self, cnt: usize) { if self.data.len() + cnt > self.data.capacity() { panic!("Buffer::advance_mut(cnt): would advance past end of Buffer"); } self.data.set_len(self.data.len() + cnt); } pub fn poll_read<R>(&mut self, reader: Pin<&mut R>, cx: &mut Context<'_>) -> Poll<io::Result<usize>> where R: AsyncRead + Unpin + ?Sized { // Safety: ReadBuf::uninit takes a MaybeUninit. let mut buf = ReadBuf::uninit(unsafe { self.spare_capacity_mut() }); futures::ready!(reader.poll_read(cx, &mut buf))?; let len = buf.filled().len(); // Safety: len = buf.filled().len() is guaranteed to be correct. unsafe { self.advance_mut(len); } Poll::Ready(Ok(len)) } // // ===== End unsafe code ===== // } impl bytes::Buf for Buffer { fn advance(&mut self, cnt: usize) { // advance buffer read pointer. self.rd_pos += cnt; if self.rd_pos > self.len()
} #[inline] fn chunk(&self) -> &[u8] { self.bytes() } #[inline] fn remaining(&self) -> usize { self.len() - self.rd_pos } } impl Deref for Buffer { type Target = [u8]; #[inline] fn deref(&self) -> &[u8] { self.bytes() } } impl DerefMut for Buffer { #[inline] fn deref_mut(&mut self) -> &mut [u8] { &mut self.data[self.start_offset + self.rd_pos..] } } impl fmt::Write for Buffer { fn write_str(&mut self, s: &str) -> Result<(), fmt::Error> { self.push_str(s); Ok(()) } } impl From<&[u8]> for Buffer { fn from(src: &[u8]) -> Self { let mut buffer = Buffer::new(); buffer.extend_from_slice(src); buffer } } impl From<Vec<u8>> for Buffer { fn from(src: Vec<u8>) -> Self { Buffer { start_offset: 0, rd_pos: 0, data: src, } } } impl From<&str> for Buffer { fn from(src: &str) -> Self { Buffer::from(src.as_bytes()) } } impl From<String> for Buffer { fn from(src: String) -> Self { Buffer::from(src.into_bytes()) } } impl From<bytes::Bytes> for Buffer { fn from(src: bytes::Bytes) -> Self { Buffer::from(&src[..]) } } impl Default for Buffer { fn default() -> Self { Buffer::new() } } impl fmt::Debug for Buffer { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let cap = self.data.capacity(); let len = self.len(); f.debug_struct("Buffer") .field("start_offset", &self.start_offset) .field("rd_pos", &self.rd_pos) .field("len", &len) .field("capacity", &cap) .field("data", &"[data]") .finish() } } #[cfg(test)] mod tests { use super::*; #[test] fn test_buffer() { let mut b = Buffer::new(); b.reserve(4096); b.start_offset = 23; b.data.resize(b.start_offset, 0); for _ in 0..50000 { b.put_str("xyzzyxyzzy"); } assert!(b.len() == 500000); assert!(&b[1000..1010] == &b"xyzzyxyzzy"[..]); } #[test] fn test_split() { let mut b = Buffer::new(); for _ in 0..5000 { b.put_str("xyzzyxyzzyz"); } assert!(b.len() == 55000); let mut n = b.split_off(4918); assert!(b.len() == 4918); assert!(n.len() == 50082); println!("1. {}", std::str::from_utf8(&b[1100..1110]).unwrap()); println!("2. {}", std::str::from_utf8(&n[1100..1110]).unwrap()); assert!(&b[1100..1110] == &b"xyzzyxyzzy"[..]); assert!(&n[1100..1110] == &b"yzzyxyzzyz"[..]); n.start_offset += 13; let x = n.split_to(20000); println!("3. n.len() {}", n.len()); println!("4. x.len() {}", x.len()); println!("5. {}", std::str::from_utf8(&n[1000..1010]).unwrap()); println!("6. {}", std::str::from_utf8(&x[1000..1010]).unwrap()); assert!(n.len() == 30069); assert!(x.len() == 20000); assert!(&n[1000..1010] == &b"yxyzzyzxyz"[..]); assert!(&x[1000..1010] == &b"zzyxyzzyzx"[..]); } #[test] fn test_spare() { let mut b = Buffer::with_capacity(194); assert!(b.data.capacity() == 4096); b.extend_from_slice(b"0123456789"); let buf: &mut [u8] = unsafe { b.spare_capacity_mut() }; assert!(buf.len() == 4086); } }
{ // "It is recommended for implementations of advance to // panic if cnt > self.remaining()" panic!("read position advanced beyond end of buffer"); }
conditional_block
udp.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{fastboot::InterfaceFactory, target::Target}, anyhow::{anyhow, bail, Context as _, Result}, async_io::Async, async_net::UdpSocket, async_trait::async_trait, byteorder::{BigEndian, ByteOrder}, futures::{ io::{AsyncRead, AsyncWrite}, task::{Context, Poll}, Future, }, std::io::ErrorKind, std::net::SocketAddr, std::num::Wrapping, std::pin::Pin, std::time::Duration, timeout::timeout, zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned}, }; const HOST_PORT: u16 = 5554; const REPLY_TIMEOUT: Duration = Duration::from_millis(500); const MAX_SIZE: u16 = 2048; // Maybe handle larger? enum PacketType { Error, Query, Init, Fastboot, } #[derive(FromBytes, Unaligned)] #[repr(C)] struct Header { id: u8, flags: u8, sequence: U16, } struct Packet<B: ByteSlice> { header: LayoutVerified<B, Header>, data: B, } impl<B: ByteSlice> Packet<B> { fn parse(bytes: B) -> Option<Packet<B>> { let (header, data) = LayoutVerified::new_from_prefix(bytes)?; Some(Self { header, data }) } #[allow(dead_code)] fn is_continuation(&self) -> bool { self.header.flags & 0x001 != 0 } fn packet_type(&self) -> Result<PacketType> { match self.header.id { 0x00 => Ok(PacketType::Error), 0x01 => Ok(PacketType::Query), 0x02 => Ok(PacketType::Init), 0x03 => Ok(PacketType::Fastboot), _ => bail!("Unknown packet type"), } } } pub struct UdpNetworkInterface { maximum_size: u16, sequence: Wrapping<u16>, socket: UdpSocket, read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>, write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>, } impl UdpNetworkInterface { fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> { // Leave four bytes for the header. let header_size = std::mem::size_of::<Header>() as u16; let max_chunk_size = self.maximum_size - header_size; let mut seq = self.sequence; let mut result = Vec::new(); let mut iter = buf.chunks(max_chunk_size.into()).peekable(); while let Some(chunk) = iter.next() { let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize); packet.push(0x03); if iter.peek().is_none() { packet.push(0x00); } else { packet.push(0x01); // Mark as continuation. } for _ in 0..2 { packet.push(0); } BigEndian::write_u16(&mut packet[2..4], seq.0); seq += Wrapping(1u16); packet.extend_from_slice(chunk); result.push(packet); } Ok(result) } } impl AsyncRead for UdpNetworkInterface { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll<std::io::Result<usize>> { if self.read_task.is_none() { let socket = self.socket.clone(); let seq = self.sequence; self.read_task.replace(Box::pin(async move { let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket) .await .map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; let mut buf_inner = Vec::new(); match packet.packet_type() { Ok(PacketType::Fastboot) => { let size = packet.data.len(); buf_inner.extend(packet.data); Ok((size, buf_inner)) } _ => Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected reply from device"), )), } })); } if let Some(ref mut task) = self.read_task { match task.as_mut().poll(cx) { Poll::Ready(Ok((sz, out_buf))) => { self.read_task = None; for i in 0..sz { buf[i] = out_buf[i]; } self.sequence += Wrapping(1u16); Poll::Ready(Ok(sz)) } Poll::Ready(Err(e)) => { self.read_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to read"), ))) } } } impl AsyncWrite for UdpNetworkInterface { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { if self.write_task.is_none()
if let Some(ref mut task) = self.write_task { match task.as_mut().poll(cx) { Poll::Ready(Ok(s)) => { self.write_task = None; for _i in 0..s { self.sequence += Wrapping(1u16); } Poll::Ready(Ok(buf.len())) } Poll::Ready(Err(e)) => { self.write_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to write"), ))) } } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } } async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> { // Try sending twice socket.send(buf).await?; match wait_for_response(socket).await { Ok(r) => Ok(r), Err(e) => { tracing::error!("Could not get reply from Fastboot device - trying again: {}", e); socket.send(buf).await?; wait_for_response(socket) .await .or_else(|e| bail!("Did not get reply from Fastboot device: {}", e)) } } } async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> { let mut buf = [0u8; 1500]; // Responses should never get this big. timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..]))) .await .map_err(|_| anyhow!("Timed out waiting for reply"))? .map_err(|e| anyhow!("Recv error: {}", e)) .map(|size| (buf, size)) } async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> { let socket: std::net::UdpSocket = match addr { SocketAddr::V4(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV4, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, SocketAddr::V6(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV6, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, } .into(); let result: UdpSocket = Async::new(socket)?.into(); result.connect(addr).await.context("connect to remote address")?; Ok(result) } fn make_query_packet() -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x01; packet } fn make_init_packet(sequence: u16) -> [u8; 8] { let mut packet = [0u8; 8]; packet[0] = 0x02; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); BigEndian::write_u16(&mut packet[4..6], 1); BigEndian::write_u16(&mut packet[6..8], MAX_SIZE); packet } fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x03; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); packet } pub struct UdpNetworkFactory {} impl UdpNetworkFactory { pub fn new() -> Self { Self {} } } #[async_trait(?Send)] impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory { async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> { let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0; let mut to_sock: SocketAddr = addr.into(); // TODO(fxb/78977): get the port from the mdns packet to_sock.set_port(HOST_PORT); let socket = make_sender_socket(to_sock).await?; let (buf, sz) = send_to_device(&make_query_packet(), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let sequence = match packet.packet_type() { Ok(PacketType::Query) => BigEndian::read_u16(&packet.data), _ => bail!("Unexpected response to query packet"), }; let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let (version, max) = match packet.packet_type() { Ok(PacketType::Init) => { (BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4])) } _ => bail!("Unexpected response to init packet"), }; let maximum_size = std::cmp::min(max, MAX_SIZE); tracing::debug!( "Fastboot over UDP connection established. Version {}. Max Size: {}", version, maximum_size ); Ok(UdpNetworkInterface { socket, maximum_size, sequence: Wrapping(sequence + 1), read_task: None, write_task: None, }) } async fn close(&self) {} }
{ // TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to // copy the bytes and move them into the async block. let packets = self.create_fastboot_packets(buf).map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not create fastboot packets: {}", e), ) })?; let socket = self.socket.clone(); self.write_task.replace(Box::pin(async move { for packet in &packets { let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; match response.packet_type() { Ok(PacketType::Fastboot) => (), _ => { return Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected Response packet"), )) } } } Ok(packets.len()) })); }
conditional_block
udp.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{fastboot::InterfaceFactory, target::Target}, anyhow::{anyhow, bail, Context as _, Result}, async_io::Async, async_net::UdpSocket, async_trait::async_trait, byteorder::{BigEndian, ByteOrder}, futures::{ io::{AsyncRead, AsyncWrite}, task::{Context, Poll}, Future, }, std::io::ErrorKind, std::net::SocketAddr, std::num::Wrapping, std::pin::Pin, std::time::Duration, timeout::timeout, zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned}, }; const HOST_PORT: u16 = 5554; const REPLY_TIMEOUT: Duration = Duration::from_millis(500); const MAX_SIZE: u16 = 2048; // Maybe handle larger? enum PacketType { Error, Query, Init, Fastboot, } #[derive(FromBytes, Unaligned)] #[repr(C)] struct Header { id: u8, flags: u8, sequence: U16, } struct Packet<B: ByteSlice> { header: LayoutVerified<B, Header>, data: B, } impl<B: ByteSlice> Packet<B> { fn parse(bytes: B) -> Option<Packet<B>> { let (header, data) = LayoutVerified::new_from_prefix(bytes)?; Some(Self { header, data }) } #[allow(dead_code)] fn is_continuation(&self) -> bool { self.header.flags & 0x001 != 0 } fn packet_type(&self) -> Result<PacketType> { match self.header.id { 0x00 => Ok(PacketType::Error), 0x01 => Ok(PacketType::Query), 0x02 => Ok(PacketType::Init), 0x03 => Ok(PacketType::Fastboot), _ => bail!("Unknown packet type"), } } } pub struct UdpNetworkInterface { maximum_size: u16, sequence: Wrapping<u16>, socket: UdpSocket, read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>, write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>, } impl UdpNetworkInterface { fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> { // Leave four bytes for the header. let header_size = std::mem::size_of::<Header>() as u16; let max_chunk_size = self.maximum_size - header_size; let mut seq = self.sequence; let mut result = Vec::new(); let mut iter = buf.chunks(max_chunk_size.into()).peekable(); while let Some(chunk) = iter.next() { let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize); packet.push(0x03); if iter.peek().is_none() { packet.push(0x00); } else { packet.push(0x01); // Mark as continuation. } for _ in 0..2 { packet.push(0); } BigEndian::write_u16(&mut packet[2..4], seq.0); seq += Wrapping(1u16); packet.extend_from_slice(chunk); result.push(packet); } Ok(result) } } impl AsyncRead for UdpNetworkInterface { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll<std::io::Result<usize>> { if self.read_task.is_none() { let socket = self.socket.clone(); let seq = self.sequence; self.read_task.replace(Box::pin(async move { let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket) .await .map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; let mut buf_inner = Vec::new(); match packet.packet_type() { Ok(PacketType::Fastboot) => { let size = packet.data.len(); buf_inner.extend(packet.data); Ok((size, buf_inner)) } _ => Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected reply from device"), )), } })); } if let Some(ref mut task) = self.read_task { match task.as_mut().poll(cx) { Poll::Ready(Ok((sz, out_buf))) => { self.read_task = None; for i in 0..sz { buf[i] = out_buf[i]; } self.sequence += Wrapping(1u16); Poll::Ready(Ok(sz)) } Poll::Ready(Err(e)) => { self.read_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to read"), ))) } } } impl AsyncWrite for UdpNetworkInterface { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { if self.write_task.is_none() {
format!("Could not create fastboot packets: {}", e), ) })?; let socket = self.socket.clone(); self.write_task.replace(Box::pin(async move { for packet in &packets { let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; match response.packet_type() { Ok(PacketType::Fastboot) => (), _ => { return Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected Response packet"), )) } } } Ok(packets.len()) })); } if let Some(ref mut task) = self.write_task { match task.as_mut().poll(cx) { Poll::Ready(Ok(s)) => { self.write_task = None; for _i in 0..s { self.sequence += Wrapping(1u16); } Poll::Ready(Ok(buf.len())) } Poll::Ready(Err(e)) => { self.write_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to write"), ))) } } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } } async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> { // Try sending twice socket.send(buf).await?; match wait_for_response(socket).await { Ok(r) => Ok(r), Err(e) => { tracing::error!("Could not get reply from Fastboot device - trying again: {}", e); socket.send(buf).await?; wait_for_response(socket) .await .or_else(|e| bail!("Did not get reply from Fastboot device: {}", e)) } } } async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> { let mut buf = [0u8; 1500]; // Responses should never get this big. timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..]))) .await .map_err(|_| anyhow!("Timed out waiting for reply"))? .map_err(|e| anyhow!("Recv error: {}", e)) .map(|size| (buf, size)) } async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> { let socket: std::net::UdpSocket = match addr { SocketAddr::V4(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV4, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, SocketAddr::V6(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV6, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, } .into(); let result: UdpSocket = Async::new(socket)?.into(); result.connect(addr).await.context("connect to remote address")?; Ok(result) } fn make_query_packet() -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x01; packet } fn make_init_packet(sequence: u16) -> [u8; 8] { let mut packet = [0u8; 8]; packet[0] = 0x02; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); BigEndian::write_u16(&mut packet[4..6], 1); BigEndian::write_u16(&mut packet[6..8], MAX_SIZE); packet } fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x03; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); packet } pub struct UdpNetworkFactory {} impl UdpNetworkFactory { pub fn new() -> Self { Self {} } } #[async_trait(?Send)] impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory { async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> { let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0; let mut to_sock: SocketAddr = addr.into(); // TODO(fxb/78977): get the port from the mdns packet to_sock.set_port(HOST_PORT); let socket = make_sender_socket(to_sock).await?; let (buf, sz) = send_to_device(&make_query_packet(), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let sequence = match packet.packet_type() { Ok(PacketType::Query) => BigEndian::read_u16(&packet.data), _ => bail!("Unexpected response to query packet"), }; let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let (version, max) = match packet.packet_type() { Ok(PacketType::Init) => { (BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4])) } _ => bail!("Unexpected response to init packet"), }; let maximum_size = std::cmp::min(max, MAX_SIZE); tracing::debug!( "Fastboot over UDP connection established. Version {}. Max Size: {}", version, maximum_size ); Ok(UdpNetworkInterface { socket, maximum_size, sequence: Wrapping(sequence + 1), read_task: None, write_task: None, }) } async fn close(&self) {} }
// TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to // copy the bytes and move them into the async block. let packets = self.create_fastboot_packets(buf).map_err(|e| { std::io::Error::new( ErrorKind::Other,
random_line_split
udp.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{fastboot::InterfaceFactory, target::Target}, anyhow::{anyhow, bail, Context as _, Result}, async_io::Async, async_net::UdpSocket, async_trait::async_trait, byteorder::{BigEndian, ByteOrder}, futures::{ io::{AsyncRead, AsyncWrite}, task::{Context, Poll}, Future, }, std::io::ErrorKind, std::net::SocketAddr, std::num::Wrapping, std::pin::Pin, std::time::Duration, timeout::timeout, zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned}, }; const HOST_PORT: u16 = 5554; const REPLY_TIMEOUT: Duration = Duration::from_millis(500); const MAX_SIZE: u16 = 2048; // Maybe handle larger? enum PacketType { Error, Query, Init, Fastboot, } #[derive(FromBytes, Unaligned)] #[repr(C)] struct Header { id: u8, flags: u8, sequence: U16, } struct Packet<B: ByteSlice> { header: LayoutVerified<B, Header>, data: B, } impl<B: ByteSlice> Packet<B> { fn parse(bytes: B) -> Option<Packet<B>> { let (header, data) = LayoutVerified::new_from_prefix(bytes)?; Some(Self { header, data }) } #[allow(dead_code)] fn is_continuation(&self) -> bool { self.header.flags & 0x001 != 0 } fn packet_type(&self) -> Result<PacketType> { match self.header.id { 0x00 => Ok(PacketType::Error), 0x01 => Ok(PacketType::Query), 0x02 => Ok(PacketType::Init), 0x03 => Ok(PacketType::Fastboot), _ => bail!("Unknown packet type"), } } } pub struct UdpNetworkInterface { maximum_size: u16, sequence: Wrapping<u16>, socket: UdpSocket, read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>, write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>, } impl UdpNetworkInterface { fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> { // Leave four bytes for the header. let header_size = std::mem::size_of::<Header>() as u16; let max_chunk_size = self.maximum_size - header_size; let mut seq = self.sequence; let mut result = Vec::new(); let mut iter = buf.chunks(max_chunk_size.into()).peekable(); while let Some(chunk) = iter.next() { let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize); packet.push(0x03); if iter.peek().is_none() { packet.push(0x00); } else { packet.push(0x01); // Mark as continuation. } for _ in 0..2 { packet.push(0); } BigEndian::write_u16(&mut packet[2..4], seq.0); seq += Wrapping(1u16); packet.extend_from_slice(chunk); result.push(packet); } Ok(result) } } impl AsyncRead for UdpNetworkInterface { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll<std::io::Result<usize>> { if self.read_task.is_none() { let socket = self.socket.clone(); let seq = self.sequence; self.read_task.replace(Box::pin(async move { let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket) .await .map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; let mut buf_inner = Vec::new(); match packet.packet_type() { Ok(PacketType::Fastboot) => { let size = packet.data.len(); buf_inner.extend(packet.data); Ok((size, buf_inner)) } _ => Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected reply from device"), )), } })); } if let Some(ref mut task) = self.read_task { match task.as_mut().poll(cx) { Poll::Ready(Ok((sz, out_buf))) => { self.read_task = None; for i in 0..sz { buf[i] = out_buf[i]; } self.sequence += Wrapping(1u16); Poll::Ready(Ok(sz)) } Poll::Ready(Err(e)) => { self.read_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to read"), ))) } } } impl AsyncWrite for UdpNetworkInterface { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { if self.write_task.is_none() { // TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to // copy the bytes and move them into the async block. let packets = self.create_fastboot_packets(buf).map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not create fastboot packets: {}", e), ) })?; let socket = self.socket.clone(); self.write_task.replace(Box::pin(async move { for packet in &packets { let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; match response.packet_type() { Ok(PacketType::Fastboot) => (), _ => { return Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected Response packet"), )) } } } Ok(packets.len()) })); } if let Some(ref mut task) = self.write_task { match task.as_mut().poll(cx) { Poll::Ready(Ok(s)) => { self.write_task = None; for _i in 0..s { self.sequence += Wrapping(1u16); } Poll::Ready(Ok(buf.len())) } Poll::Ready(Err(e)) => { self.write_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to write"), ))) } } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } } async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> { // Try sending twice socket.send(buf).await?; match wait_for_response(socket).await { Ok(r) => Ok(r), Err(e) => { tracing::error!("Could not get reply from Fastboot device - trying again: {}", e); socket.send(buf).await?; wait_for_response(socket) .await .or_else(|e| bail!("Did not get reply from Fastboot device: {}", e)) } } } async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)> { let mut buf = [0u8; 1500]; // Responses should never get this big. timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..]))) .await .map_err(|_| anyhow!("Timed out waiting for reply"))? .map_err(|e| anyhow!("Recv error: {}", e)) .map(|size| (buf, size)) } async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> { let socket: std::net::UdpSocket = match addr { SocketAddr::V4(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV4, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, SocketAddr::V6(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV6, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, } .into(); let result: UdpSocket = Async::new(socket)?.into(); result.connect(addr).await.context("connect to remote address")?; Ok(result) } fn make_query_packet() -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x01; packet } fn
(sequence: u16) -> [u8; 8] { let mut packet = [0u8; 8]; packet[0] = 0x02; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); BigEndian::write_u16(&mut packet[4..6], 1); BigEndian::write_u16(&mut packet[6..8], MAX_SIZE); packet } fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x03; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); packet } pub struct UdpNetworkFactory {} impl UdpNetworkFactory { pub fn new() -> Self { Self {} } } #[async_trait(?Send)] impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory { async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> { let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0; let mut to_sock: SocketAddr = addr.into(); // TODO(fxb/78977): get the port from the mdns packet to_sock.set_port(HOST_PORT); let socket = make_sender_socket(to_sock).await?; let (buf, sz) = send_to_device(&make_query_packet(), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let sequence = match packet.packet_type() { Ok(PacketType::Query) => BigEndian::read_u16(&packet.data), _ => bail!("Unexpected response to query packet"), }; let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let (version, max) = match packet.packet_type() { Ok(PacketType::Init) => { (BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4])) } _ => bail!("Unexpected response to init packet"), }; let maximum_size = std::cmp::min(max, MAX_SIZE); tracing::debug!( "Fastboot over UDP connection established. Version {}. Max Size: {}", version, maximum_size ); Ok(UdpNetworkInterface { socket, maximum_size, sequence: Wrapping(sequence + 1), read_task: None, write_task: None, }) } async fn close(&self) {} }
make_init_packet
identifier_name
udp.rs
// Copyright 2021 The Fuchsia Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. use { crate::{fastboot::InterfaceFactory, target::Target}, anyhow::{anyhow, bail, Context as _, Result}, async_io::Async, async_net::UdpSocket, async_trait::async_trait, byteorder::{BigEndian, ByteOrder}, futures::{ io::{AsyncRead, AsyncWrite}, task::{Context, Poll}, Future, }, std::io::ErrorKind, std::net::SocketAddr, std::num::Wrapping, std::pin::Pin, std::time::Duration, timeout::timeout, zerocopy::{byteorder::big_endian::U16, ByteSlice, FromBytes, LayoutVerified, Unaligned}, }; const HOST_PORT: u16 = 5554; const REPLY_TIMEOUT: Duration = Duration::from_millis(500); const MAX_SIZE: u16 = 2048; // Maybe handle larger? enum PacketType { Error, Query, Init, Fastboot, } #[derive(FromBytes, Unaligned)] #[repr(C)] struct Header { id: u8, flags: u8, sequence: U16, } struct Packet<B: ByteSlice> { header: LayoutVerified<B, Header>, data: B, } impl<B: ByteSlice> Packet<B> { fn parse(bytes: B) -> Option<Packet<B>> { let (header, data) = LayoutVerified::new_from_prefix(bytes)?; Some(Self { header, data }) } #[allow(dead_code)] fn is_continuation(&self) -> bool { self.header.flags & 0x001 != 0 } fn packet_type(&self) -> Result<PacketType> { match self.header.id { 0x00 => Ok(PacketType::Error), 0x01 => Ok(PacketType::Query), 0x02 => Ok(PacketType::Init), 0x03 => Ok(PacketType::Fastboot), _ => bail!("Unknown packet type"), } } } pub struct UdpNetworkInterface { maximum_size: u16, sequence: Wrapping<u16>, socket: UdpSocket, read_task: Option<Pin<Box<dyn Future<Output = std::io::Result<(usize, Vec<u8>)>>>>>, write_task: Option<Pin<Box<dyn Future<Output = std::io::Result<usize>>>>>, } impl UdpNetworkInterface { fn create_fastboot_packets(&mut self, buf: &[u8]) -> Result<Vec<Vec<u8>>> { // Leave four bytes for the header. let header_size = std::mem::size_of::<Header>() as u16; let max_chunk_size = self.maximum_size - header_size; let mut seq = self.sequence; let mut result = Vec::new(); let mut iter = buf.chunks(max_chunk_size.into()).peekable(); while let Some(chunk) = iter.next() { let mut packet: Vec<u8> = Vec::with_capacity(chunk.len() + header_size as usize); packet.push(0x03); if iter.peek().is_none() { packet.push(0x00); } else { packet.push(0x01); // Mark as continuation. } for _ in 0..2 { packet.push(0); } BigEndian::write_u16(&mut packet[2..4], seq.0); seq += Wrapping(1u16); packet.extend_from_slice(chunk); result.push(packet); } Ok(result) } } impl AsyncRead for UdpNetworkInterface { fn poll_read( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &mut [u8], ) -> Poll<std::io::Result<usize>> { if self.read_task.is_none() { let socket = self.socket.clone(); let seq = self.sequence; self.read_task.replace(Box::pin(async move { let (out_buf, sz) = send_to_device(&make_empty_fastboot_packet(seq.0), &socket) .await .map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let packet = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; let mut buf_inner = Vec::new(); match packet.packet_type() { Ok(PacketType::Fastboot) => { let size = packet.data.len(); buf_inner.extend(packet.data); Ok((size, buf_inner)) } _ => Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected reply from device"), )), } })); } if let Some(ref mut task) = self.read_task { match task.as_mut().poll(cx) { Poll::Ready(Ok((sz, out_buf))) => { self.read_task = None; for i in 0..sz { buf[i] = out_buf[i]; } self.sequence += Wrapping(1u16); Poll::Ready(Ok(sz)) } Poll::Ready(Err(e)) => { self.read_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to read"), ))) } } } impl AsyncWrite for UdpNetworkInterface { fn poll_write( mut self: Pin<&mut Self>, cx: &mut Context<'_>, buf: &[u8], ) -> Poll<std::io::Result<usize>> { if self.write_task.is_none() { // TODO(fxb/78975): unfortunately the Task requires the 'static lifetime so we have to // copy the bytes and move them into the async block. let packets = self.create_fastboot_packets(buf).map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not create fastboot packets: {}", e), ) })?; let socket = self.socket.clone(); self.write_task.replace(Box::pin(async move { for packet in &packets { let (out_buf, sz) = send_to_device(&packet, &socket).await.map_err(|e| { std::io::Error::new( ErrorKind::Other, format!("Could not send emtpy fastboot packet to device: {}", e), ) })?; let response = Packet::parse(&out_buf[..sz]).ok_or(std::io::Error::new( ErrorKind::Other, format!("Could not parse response packet"), ))?; match response.packet_type() { Ok(PacketType::Fastboot) => (), _ => { return Err(std::io::Error::new( ErrorKind::Other, format!("Unexpected Response packet"), )) } } } Ok(packets.len()) })); } if let Some(ref mut task) = self.write_task { match task.as_mut().poll(cx) { Poll::Ready(Ok(s)) => { self.write_task = None; for _i in 0..s { self.sequence += Wrapping(1u16); } Poll::Ready(Ok(buf.len())) } Poll::Ready(Err(e)) => { self.write_task = None; Poll::Ready(Err(e)) } Poll::Pending => Poll::Pending, } } else { // Really shouldn't get here Poll::Ready(Err(std::io::Error::new( ErrorKind::Other, format!("Could not add async task to write"), ))) } } fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } fn poll_close(self: Pin<&mut Self>, _cx: &mut Context<'_>) -> Poll<std::io::Result<()>> { unimplemented!(); } } async fn send_to_device(buf: &[u8], socket: &UdpSocket) -> Result<([u8; 1500], usize)> { // Try sending twice socket.send(buf).await?; match wait_for_response(socket).await { Ok(r) => Ok(r), Err(e) => { tracing::error!("Could not get reply from Fastboot device - trying again: {}", e); socket.send(buf).await?; wait_for_response(socket) .await .or_else(|e| bail!("Did not get reply from Fastboot device: {}", e)) } } } async fn wait_for_response(socket: &UdpSocket) -> Result<([u8; 1500], usize)>
async fn make_sender_socket(addr: SocketAddr) -> Result<UdpSocket> { let socket: std::net::UdpSocket = match addr { SocketAddr::V4(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV4, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, SocketAddr::V6(ref _saddr) => socket2::Socket::new( socket2::Domain::IPV6, socket2::Type::DGRAM, Some(socket2::Protocol::UDP), ) .context("construct datagram socket")?, } .into(); let result: UdpSocket = Async::new(socket)?.into(); result.connect(addr).await.context("connect to remote address")?; Ok(result) } fn make_query_packet() -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x01; packet } fn make_init_packet(sequence: u16) -> [u8; 8] { let mut packet = [0u8; 8]; packet[0] = 0x02; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); BigEndian::write_u16(&mut packet[4..6], 1); BigEndian::write_u16(&mut packet[6..8], MAX_SIZE); packet } fn make_empty_fastboot_packet(sequence: u16) -> [u8; 4] { let mut packet = [0u8; 4]; packet[0] = 0x03; packet[1] = 0x00; BigEndian::write_u16(&mut packet[2..4], sequence); packet } pub struct UdpNetworkFactory {} impl UdpNetworkFactory { pub fn new() -> Self { Self {} } } #[async_trait(?Send)] impl InterfaceFactory<UdpNetworkInterface> for UdpNetworkFactory { async fn open(&mut self, target: &Target) -> Result<UdpNetworkInterface> { let addr = target.fastboot_address().ok_or(anyhow!("No network address for fastboot"))?.0; let mut to_sock: SocketAddr = addr.into(); // TODO(fxb/78977): get the port from the mdns packet to_sock.set_port(HOST_PORT); let socket = make_sender_socket(to_sock).await?; let (buf, sz) = send_to_device(&make_query_packet(), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let sequence = match packet.packet_type() { Ok(PacketType::Query) => BigEndian::read_u16(&packet.data), _ => bail!("Unexpected response to query packet"), }; let (buf, sz) = send_to_device(&make_init_packet(sequence), &socket) .await .map_err(|e| anyhow!("Sending error: {}", e))?; let packet = Packet::parse(&buf[..sz]).ok_or(anyhow!("Could not parse response packet."))?; let (version, max) = match packet.packet_type() { Ok(PacketType::Init) => { (BigEndian::read_u16(&packet.data[..2]), BigEndian::read_u16(&packet.data[2..4])) } _ => bail!("Unexpected response to init packet"), }; let maximum_size = std::cmp::min(max, MAX_SIZE); tracing::debug!( "Fastboot over UDP connection established. Version {}. Max Size: {}", version, maximum_size ); Ok(UdpNetworkInterface { socket, maximum_size, sequence: Wrapping(sequence + 1), read_task: None, write_task: None, }) } async fn close(&self) {} }
{ let mut buf = [0u8; 1500]; // Responses should never get this big. timeout(REPLY_TIMEOUT, Box::pin(socket.recv(&mut buf[..]))) .await .map_err(|_| anyhow!("Timed out waiting for reply"))? .map_err(|e| anyhow!("Recv error: {}", e)) .map(|size| (buf, size)) }
identifier_body
plate.ts
import * as THREE from "three"; import getGrid from "./grid"; import config from "../config"; import PlateBase from "./plate-base"; import Subplate, { ISerializedSubplate } from "./subplate"; import Field, { IFieldOptions, ISerializedField } from "./field"; import { IMatrix3Array, IQuaternionArray, IVec3Array } from "../types"; import PlateGroup, { ISerializedPlateGroup } from "./plate-group"; // The stronger initial plate force, the sooner it should be decreased. const HOT_SPOT_TORQUE_DECREASE = config.constantHotSpots ? 0 : 0.2 * config.userForce; const MIN_PLATE_SIZE = 100000; // km, roughly the size of a plate label interface IOptions { id: number; density?: number; hue?: number; } // See: https://app.zeplin.io/project/60c9c0d5060353bd2bb10172/screen/62768bda825a8d13749065fc // When these values get updated, remember to update arrows colors in boundary-config-dialog.less. // Other colors (plate labels, 3D arrows) will automatically pick up colors from this array. export const plateHues = [ 29, 186, 277, 47, 330, // up to 5 initial plates used by basic presets 71, 205, 166, 258, 359 // 5 extra colors for plates that might be created during simulation (plate division) ]; export interface ISerializedPlate { id: number; quaternion: IQuaternionArray; angularVelocity: IVec3Array; hue: number; density: number; mass: number; momentOfInertia: IMatrix3Array; invMomentOfInertia: IMatrix3Array; center: null | IVec3Array; hotSpot: { position: IVec3Array; force: IVec3Array; }; fields: ISerializedField[]; adjacentFields: ISerializedField[]; subplate: ISerializedSubplate; plateGroup: ISerializedPlateGroup | null; } export default class Plate extends PlateBase<Field> { id: number; density: number; hue: number; adjacentFields: Map<number, Field>; center: null | THREE.Vector3; invMomentOfInertia: THREE.Matrix3; momentOfInertia: THREE.Matrix3; mass: number; subplate: Subplate; quaternion: THREE.Quaternion; angularVelocity: THREE.Vector3; fields: Map<number, Field>; isSubplate = false; plateGroup: PlateGroup | null; hotSpot: { position: THREE.Vector3; force: THREE.Vector3; }; constructor({ id, density, hue }: IOptions) { super(); this.id = id; // Decides whether plate goes under or above another plate while subducting (ocean-ocean). this.density = density || 0; // Base color / hue of the plate used to visually identify it. this.hue = hue || 0; this.quaternion = new THREE.Quaternion(); this.angularVelocity = new THREE.Vector3(); this.fields = new Map(); this.adjacentFields = new Map(); // Physics properties: this.mass = 0; this.invMomentOfInertia = new THREE.Matrix3(); this.momentOfInertia = new THREE.Matrix3(); this.center = null; // Torque / force that is pushing plate. It might be constant or decrease with time (). this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) }; // Subplate is a container for some additional fields attached to this plate. // At this point mostly fields that were subducting under and were detached from the original plate. this.subplate = new Subplate(this); this.plateGroup = null; } serialize(): ISerializedPlate { return { id: this.id, quaternion: this.quaternion.toArray(), angularVelocity: this.angularVelocity.toArray(), hue: this.hue, density: this.density, mass: this.mass, momentOfInertia: this.momentOfInertia.toArray(), invMomentOfInertia: this.invMomentOfInertia.toArray(), center: this.center?.toArray() || null, hotSpot: { force: this.hotSpot.force.toArray(), position: this.hotSpot.position.toArray() }, fields: Array.from(this.fields.values()).map(field => field.serialize()), adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()), subplate: this.subplate.serialize(), plateGroup: this.plateGroup?.serialize() || null }; } static deserialize(props: ISerializedPlate) { const plate = new Plate({ id: props.id }); plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion); plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity); plate.hue = props.hue; plate.density = props.density; plate.mass = props.mass; plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia); plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia); plate.center = props.center && (new THREE.Vector3()).fromArray(props.center); plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force); plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position); props.fields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.fields.set(field.id, field); }); props.adjacentFields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.adjacentFields.set(field.id, field); }); plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate); plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates return plate; } // It depends on current angular velocity and velocities of other, colliding plates. // Note that this is pretty expensive to calculate, so if used much, the current value should be cached. get totalTorque() { const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force); this.fields.forEach((field: Field) => { totalTorque.add(field.torque); }); return totalTorque; } get angularAcceleration() { if (this.plateGroup) { return this.plateGroup.angularAcceleration; } return this.totalTorque.applyMatrix3(this.invMomentOfInertia); } mergedWith(anotherPlate: Plate) { if (!this.plateGroup || !anotherPlate.plateGroup) { return false; } return this.plateGroup === anotherPlate.plateGroup; } updateCenter() { const safeFields: Record<string, Field> = {}; const safeSum = new THREE.Vector3(); let safeArea = 0; this.fields.forEach((field: Field) => { if (!field.subduction) { let safe = true; // Some subducting fields do not get marked because they move so slowly // Ignore fields adjacent to subducting fields just to be safe field.forEachNeighbor((neighbor: Field) => { if (neighbor.subduction) { safe = false; } }); if (safe) { safeFields[field.id] = field; safeSum.add(field.absolutePos); safeArea += field.area; } } }); if (safeArea < MIN_PLATE_SIZE) { // If the visible area of a plate is too small, don't bother labelling this.center = new THREE.Vector3(); } else { // Otherwise, use the field nearest the center const geographicCenter = safeSum.normalize(); let closestPoint = new THREE.Vector3(0, 0, 0); let minDist = Number.MAX_VALUE; for (const id in safeFields) { const field = safeFields[id]; const dist = field.absolutePos.distanceTo(geographicCenter); if (dist < minDist) { closestPoint = field.absolutePos; minDist = dist; } } this.center = closestPoint; } } updateInertiaTensor() { this.mass = 0; let ixx = 0; let iyy = 0; let izz = 0; let ixy = 0; let ixz = 0; let iyz = 0; this.fields.forEach((field: Field) => { const mass = field.mass; const p = field.absolutePos; ixx += mass * (p.y * p.y + p.z * p.z); iyy += mass * (p.x * p.x + p.z * p.z); izz += mass * (p.x * p.x + p.y * p.y); ixy -= mass * p.x * p.y; ixz -= mass * p.x * p.z; iyz -= mass * p.y * p.z; this.mass += mass; }); this.momentOfInertia = new THREE.Matrix3(); this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz); this.invMomentOfInertia = new THREE.Matrix3(); this.invMomentOfInertia.copy(this.momentOfInertia).invert(); } updateHotSpot(timestep: number) { const len = this.hotSpot.force.length(); if (len > 0) { this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE)); } } setHotSpot(position: THREE.Vector3, force: THREE.Vector3) { this.hotSpot = { position, force }; } setDensity(density: number) { this.density = density; } removeUnnecessaryFields() { this.fields.forEach((f: Field) => { if (!f.alive) { this.deleteField(f.id); } }); } addField(props: Omit<IFieldOptions, "plate">)
addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); if (!this.fields.has(id)) { return this.addField({ ...props, id }); } } addExistingField(field: Field) { const id = field.id; field.plate = this; this.fields.set(id, field); if (this.adjacentFields.has(id)) { this.adjacentFields.delete(id); } field.adjacentFields.forEach((adjFieldId: number) => { if (!this.fields.has(adjFieldId)) { this.addAdjacentField(adjFieldId); } else { const adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = adjField.isBoundary(); } } }); field.boundary = field.isBoundary(); return field; } deleteField(id: number) { const field = this.fields.get(id); if (!field) { return; } this.fields.delete(id); this.subplate.deleteField(id); this.addAdjacentField(id); field.adjacentFields.forEach((adjFieldId: number) => { let adjField = this.adjacentFields.get(adjFieldId); if (adjField && !adjField.isAdjacentField()) { this.adjacentFields.delete(adjFieldId); } adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = true; } }); } addAdjacentField(id: number) { if (!this.adjacentFields.has(id)) { const newField = new Field({ id, plate: this, adjacent: true }); if (newField.isAdjacentField()) { this.adjacentFields.set(id, newField); } } } neighborsCount(absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); let count = 0; getGrid().fields[id].adjacentFields.forEach((adjId: number) => { if (this.fields.has(adjId)) { count += 1; } }); return count; } calculateContinentBuffers() { const grid = getGrid(); const queue: Field[] = []; const dist: Record<string, number> = {}; const getDist = (field: Field) => { const id = field.id; if (dist[id] !== undefined) { return dist[id]; } return Infinity; }; this.forEachField((field: Field) => { field.isContinentBuffer = false; if (field.continentalCrust) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > grid.fieldDiameterInKm) { dist[adjField.id] = grid.fieldDiameterInKm; queue.push(adjField); } }); } }); while (queue.length > 0) { const field = queue.shift() as Field; field.isContinentBuffer = true; const newDist = getDist(field) + grid.fieldDiameterInKm; if (newDist < config.continentBufferWidth) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > newDist) { dist[adjField.id] = newDist; queue.push(adjField); } }); } } } addToSubplate(field: Field) { field.alive = false; this.subplate.addField(field); } // Returns fields adjacent to the whole plate which will be probably added to it soon (around divergent boundaries). getVisibleAdjacentFields() { const result: Field[] = []; this.adjacentFields.forEach((field: Field) => { if (field.noCollisionDist > 0) { result.push(field); } }); return result; } sortFields() { // Sort fields by ID. Map traversal follows insertion order. // This is not necessary, but it lets us test model better. Quaternion and physical properties are often calculated // by traversing all the fields. Order of this traverse might influence micro numerical errors that can create // visible differences in a longer run. Example of a place where it matters: plate-division-merge.test.ts this.fields = new Map<number, Field>(Array.from(this.fields.entries()).sort((a, b) => a[0] - b[0])); this.adjacentFields = new Map<number, Field>(Array.from(this.adjacentFields.entries()).sort((a, b) => a[0] - b[0])); } }
{ const field = new Field({ ...props, plate: this }); this.addExistingField(field); return field; }
identifier_body
plate.ts
import * as THREE from "three"; import getGrid from "./grid"; import config from "../config"; import PlateBase from "./plate-base"; import Subplate, { ISerializedSubplate } from "./subplate"; import Field, { IFieldOptions, ISerializedField } from "./field"; import { IMatrix3Array, IQuaternionArray, IVec3Array } from "../types"; import PlateGroup, { ISerializedPlateGroup } from "./plate-group"; // The stronger initial plate force, the sooner it should be decreased. const HOT_SPOT_TORQUE_DECREASE = config.constantHotSpots ? 0 : 0.2 * config.userForce; const MIN_PLATE_SIZE = 100000; // km, roughly the size of a plate label interface IOptions { id: number; density?: number; hue?: number; } // See: https://app.zeplin.io/project/60c9c0d5060353bd2bb10172/screen/62768bda825a8d13749065fc // When these values get updated, remember to update arrows colors in boundary-config-dialog.less. // Other colors (plate labels, 3D arrows) will automatically pick up colors from this array. export const plateHues = [ 29, 186, 277, 47, 330, // up to 5 initial plates used by basic presets 71, 205, 166, 258, 359 // 5 extra colors for plates that might be created during simulation (plate division) ]; export interface ISerializedPlate { id: number; quaternion: IQuaternionArray; angularVelocity: IVec3Array; hue: number; density: number; mass: number; momentOfInertia: IMatrix3Array; invMomentOfInertia: IMatrix3Array; center: null | IVec3Array; hotSpot: { position: IVec3Array; force: IVec3Array; }; fields: ISerializedField[]; adjacentFields: ISerializedField[]; subplate: ISerializedSubplate; plateGroup: ISerializedPlateGroup | null; } export default class Plate extends PlateBase<Field> { id: number; density: number; hue: number; adjacentFields: Map<number, Field>; center: null | THREE.Vector3; invMomentOfInertia: THREE.Matrix3; momentOfInertia: THREE.Matrix3; mass: number; subplate: Subplate; quaternion: THREE.Quaternion; angularVelocity: THREE.Vector3; fields: Map<number, Field>; isSubplate = false; plateGroup: PlateGroup | null; hotSpot: { position: THREE.Vector3; force: THREE.Vector3; }; constructor({ id, density, hue }: IOptions) { super(); this.id = id; // Decides whether plate goes under or above another plate while subducting (ocean-ocean). this.density = density || 0; // Base color / hue of the plate used to visually identify it. this.hue = hue || 0; this.quaternion = new THREE.Quaternion(); this.angularVelocity = new THREE.Vector3(); this.fields = new Map(); this.adjacentFields = new Map(); // Physics properties: this.mass = 0; this.invMomentOfInertia = new THREE.Matrix3(); this.momentOfInertia = new THREE.Matrix3(); this.center = null; // Torque / force that is pushing plate. It might be constant or decrease with time (). this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) }; // Subplate is a container for some additional fields attached to this plate. // At this point mostly fields that were subducting under and were detached from the original plate. this.subplate = new Subplate(this); this.plateGroup = null; } serialize(): ISerializedPlate { return { id: this.id, quaternion: this.quaternion.toArray(), angularVelocity: this.angularVelocity.toArray(), hue: this.hue, density: this.density, mass: this.mass, momentOfInertia: this.momentOfInertia.toArray(), invMomentOfInertia: this.invMomentOfInertia.toArray(), center: this.center?.toArray() || null, hotSpot: { force: this.hotSpot.force.toArray(), position: this.hotSpot.position.toArray() }, fields: Array.from(this.fields.values()).map(field => field.serialize()), adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()), subplate: this.subplate.serialize(), plateGroup: this.plateGroup?.serialize() || null }; } static deserialize(props: ISerializedPlate) { const plate = new Plate({ id: props.id }); plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion); plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity); plate.hue = props.hue; plate.density = props.density; plate.mass = props.mass; plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia); plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia); plate.center = props.center && (new THREE.Vector3()).fromArray(props.center); plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force); plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position); props.fields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.fields.set(field.id, field); }); props.adjacentFields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.adjacentFields.set(field.id, field); }); plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate); plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates return plate; } // It depends on current angular velocity and velocities of other, colliding plates. // Note that this is pretty expensive to calculate, so if used much, the current value should be cached. get totalTorque() { const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force); this.fields.forEach((field: Field) => { totalTorque.add(field.torque); }); return totalTorque; } get angularAcceleration() { if (this.plateGroup) { return this.plateGroup.angularAcceleration; } return this.totalTorque.applyMatrix3(this.invMomentOfInertia); } mergedWith(anotherPlate: Plate) { if (!this.plateGroup || !anotherPlate.plateGroup) { return false; } return this.plateGroup === anotherPlate.plateGroup; } updateCenter() { const safeFields: Record<string, Field> = {}; const safeSum = new THREE.Vector3(); let safeArea = 0; this.fields.forEach((field: Field) => { if (!field.subduction) { let safe = true; // Some subducting fields do not get marked because they move so slowly // Ignore fields adjacent to subducting fields just to be safe field.forEachNeighbor((neighbor: Field) => { if (neighbor.subduction) { safe = false; } }); if (safe) { safeFields[field.id] = field; safeSum.add(field.absolutePos); safeArea += field.area; } } }); if (safeArea < MIN_PLATE_SIZE) { // If the visible area of a plate is too small, don't bother labelling this.center = new THREE.Vector3(); } else { // Otherwise, use the field nearest the center const geographicCenter = safeSum.normalize(); let closestPoint = new THREE.Vector3(0, 0, 0); let minDist = Number.MAX_VALUE; for (const id in safeFields) { const field = safeFields[id]; const dist = field.absolutePos.distanceTo(geographicCenter); if (dist < minDist) { closestPoint = field.absolutePos; minDist = dist; } } this.center = closestPoint; } } updateInertiaTensor() { this.mass = 0; let ixx = 0; let iyy = 0; let izz = 0; let ixy = 0; let ixz = 0; let iyz = 0; this.fields.forEach((field: Field) => { const mass = field.mass; const p = field.absolutePos; ixx += mass * (p.y * p.y + p.z * p.z); iyy += mass * (p.x * p.x + p.z * p.z); izz += mass * (p.x * p.x + p.y * p.y); ixy -= mass * p.x * p.y; ixz -= mass * p.x * p.z; iyz -= mass * p.y * p.z; this.mass += mass; }); this.momentOfInertia = new THREE.Matrix3(); this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz); this.invMomentOfInertia = new THREE.Matrix3(); this.invMomentOfInertia.copy(this.momentOfInertia).invert(); } updateHotSpot(timestep: number) {
} setHotSpot(position: THREE.Vector3, force: THREE.Vector3) { this.hotSpot = { position, force }; } setDensity(density: number) { this.density = density; } removeUnnecessaryFields() { this.fields.forEach((f: Field) => { if (!f.alive) { this.deleteField(f.id); } }); } addField(props: Omit<IFieldOptions, "plate">) { const field = new Field({ ...props, plate: this }); this.addExistingField(field); return field; } addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); if (!this.fields.has(id)) { return this.addField({ ...props, id }); } } addExistingField(field: Field) { const id = field.id; field.plate = this; this.fields.set(id, field); if (this.adjacentFields.has(id)) { this.adjacentFields.delete(id); } field.adjacentFields.forEach((adjFieldId: number) => { if (!this.fields.has(adjFieldId)) { this.addAdjacentField(adjFieldId); } else { const adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = adjField.isBoundary(); } } }); field.boundary = field.isBoundary(); return field; } deleteField(id: number) { const field = this.fields.get(id); if (!field) { return; } this.fields.delete(id); this.subplate.deleteField(id); this.addAdjacentField(id); field.adjacentFields.forEach((adjFieldId: number) => { let adjField = this.adjacentFields.get(adjFieldId); if (adjField && !adjField.isAdjacentField()) { this.adjacentFields.delete(adjFieldId); } adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = true; } }); } addAdjacentField(id: number) { if (!this.adjacentFields.has(id)) { const newField = new Field({ id, plate: this, adjacent: true }); if (newField.isAdjacentField()) { this.adjacentFields.set(id, newField); } } } neighborsCount(absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); let count = 0; getGrid().fields[id].adjacentFields.forEach((adjId: number) => { if (this.fields.has(adjId)) { count += 1; } }); return count; } calculateContinentBuffers() { const grid = getGrid(); const queue: Field[] = []; const dist: Record<string, number> = {}; const getDist = (field: Field) => { const id = field.id; if (dist[id] !== undefined) { return dist[id]; } return Infinity; }; this.forEachField((field: Field) => { field.isContinentBuffer = false; if (field.continentalCrust) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > grid.fieldDiameterInKm) { dist[adjField.id] = grid.fieldDiameterInKm; queue.push(adjField); } }); } }); while (queue.length > 0) { const field = queue.shift() as Field; field.isContinentBuffer = true; const newDist = getDist(field) + grid.fieldDiameterInKm; if (newDist < config.continentBufferWidth) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > newDist) { dist[adjField.id] = newDist; queue.push(adjField); } }); } } } addToSubplate(field: Field) { field.alive = false; this.subplate.addField(field); } // Returns fields adjacent to the whole plate which will be probably added to it soon (around divergent boundaries). getVisibleAdjacentFields() { const result: Field[] = []; this.adjacentFields.forEach((field: Field) => { if (field.noCollisionDist > 0) { result.push(field); } }); return result; } sortFields() { // Sort fields by ID. Map traversal follows insertion order. // This is not necessary, but it lets us test model better. Quaternion and physical properties are often calculated // by traversing all the fields. Order of this traverse might influence micro numerical errors that can create // visible differences in a longer run. Example of a place where it matters: plate-division-merge.test.ts this.fields = new Map<number, Field>(Array.from(this.fields.entries()).sort((a, b) => a[0] - b[0])); this.adjacentFields = new Map<number, Field>(Array.from(this.adjacentFields.entries()).sort((a, b) => a[0] - b[0])); } }
const len = this.hotSpot.force.length(); if (len > 0) { this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE)); }
random_line_split
plate.ts
import * as THREE from "three"; import getGrid from "./grid"; import config from "../config"; import PlateBase from "./plate-base"; import Subplate, { ISerializedSubplate } from "./subplate"; import Field, { IFieldOptions, ISerializedField } from "./field"; import { IMatrix3Array, IQuaternionArray, IVec3Array } from "../types"; import PlateGroup, { ISerializedPlateGroup } from "./plate-group"; // The stronger initial plate force, the sooner it should be decreased. const HOT_SPOT_TORQUE_DECREASE = config.constantHotSpots ? 0 : 0.2 * config.userForce; const MIN_PLATE_SIZE = 100000; // km, roughly the size of a plate label interface IOptions { id: number; density?: number; hue?: number; } // See: https://app.zeplin.io/project/60c9c0d5060353bd2bb10172/screen/62768bda825a8d13749065fc // When these values get updated, remember to update arrows colors in boundary-config-dialog.less. // Other colors (plate labels, 3D arrows) will automatically pick up colors from this array. export const plateHues = [ 29, 186, 277, 47, 330, // up to 5 initial plates used by basic presets 71, 205, 166, 258, 359 // 5 extra colors for plates that might be created during simulation (plate division) ]; export interface ISerializedPlate { id: number; quaternion: IQuaternionArray; angularVelocity: IVec3Array; hue: number; density: number; mass: number; momentOfInertia: IMatrix3Array; invMomentOfInertia: IMatrix3Array; center: null | IVec3Array; hotSpot: { position: IVec3Array; force: IVec3Array; }; fields: ISerializedField[]; adjacentFields: ISerializedField[]; subplate: ISerializedSubplate; plateGroup: ISerializedPlateGroup | null; } export default class Plate extends PlateBase<Field> { id: number; density: number; hue: number; adjacentFields: Map<number, Field>; center: null | THREE.Vector3; invMomentOfInertia: THREE.Matrix3; momentOfInertia: THREE.Matrix3; mass: number; subplate: Subplate; quaternion: THREE.Quaternion; angularVelocity: THREE.Vector3; fields: Map<number, Field>; isSubplate = false; plateGroup: PlateGroup | null; hotSpot: { position: THREE.Vector3; force: THREE.Vector3; }; constructor({ id, density, hue }: IOptions) { super(); this.id = id; // Decides whether plate goes under or above another plate while subducting (ocean-ocean). this.density = density || 0; // Base color / hue of the plate used to visually identify it. this.hue = hue || 0; this.quaternion = new THREE.Quaternion(); this.angularVelocity = new THREE.Vector3(); this.fields = new Map(); this.adjacentFields = new Map(); // Physics properties: this.mass = 0; this.invMomentOfInertia = new THREE.Matrix3(); this.momentOfInertia = new THREE.Matrix3(); this.center = null; // Torque / force that is pushing plate. It might be constant or decrease with time (). this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) }; // Subplate is a container for some additional fields attached to this plate. // At this point mostly fields that were subducting under and were detached from the original plate. this.subplate = new Subplate(this); this.plateGroup = null; } serialize(): ISerializedPlate { return { id: this.id, quaternion: this.quaternion.toArray(), angularVelocity: this.angularVelocity.toArray(), hue: this.hue, density: this.density, mass: this.mass, momentOfInertia: this.momentOfInertia.toArray(), invMomentOfInertia: this.invMomentOfInertia.toArray(), center: this.center?.toArray() || null, hotSpot: { force: this.hotSpot.force.toArray(), position: this.hotSpot.position.toArray() }, fields: Array.from(this.fields.values()).map(field => field.serialize()), adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()), subplate: this.subplate.serialize(), plateGroup: this.plateGroup?.serialize() || null }; } static deserialize(props: ISerializedPlate) { const plate = new Plate({ id: props.id }); plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion); plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity); plate.hue = props.hue; plate.density = props.density; plate.mass = props.mass; plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia); plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia); plate.center = props.center && (new THREE.Vector3()).fromArray(props.center); plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force); plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position); props.fields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.fields.set(field.id, field); }); props.adjacentFields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.adjacentFields.set(field.id, field); }); plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate); plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates return plate; } // It depends on current angular velocity and velocities of other, colliding plates. // Note that this is pretty expensive to calculate, so if used much, the current value should be cached. get totalTorque() { const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force); this.fields.forEach((field: Field) => { totalTorque.add(field.torque); }); return totalTorque; } get angularAcceleration() { if (this.plateGroup) { return this.plateGroup.angularAcceleration; } return this.totalTorque.applyMatrix3(this.invMomentOfInertia); } mergedWith(anotherPlate: Plate) { if (!this.plateGroup || !anotherPlate.plateGroup) { return false; } return this.plateGroup === anotherPlate.plateGroup; } updateCenter() { const safeFields: Record<string, Field> = {}; const safeSum = new THREE.Vector3(); let safeArea = 0; this.fields.forEach((field: Field) => { if (!field.subduction) { let safe = true; // Some subducting fields do not get marked because they move so slowly // Ignore fields adjacent to subducting fields just to be safe field.forEachNeighbor((neighbor: Field) => { if (neighbor.subduction) { safe = false; } }); if (safe) { safeFields[field.id] = field; safeSum.add(field.absolutePos); safeArea += field.area; } } }); if (safeArea < MIN_PLATE_SIZE) { // If the visible area of a plate is too small, don't bother labelling this.center = new THREE.Vector3(); } else { // Otherwise, use the field nearest the center const geographicCenter = safeSum.normalize(); let closestPoint = new THREE.Vector3(0, 0, 0); let minDist = Number.MAX_VALUE; for (const id in safeFields) { const field = safeFields[id]; const dist = field.absolutePos.distanceTo(geographicCenter); if (dist < minDist) { closestPoint = field.absolutePos; minDist = dist; } } this.center = closestPoint; } } updateInertiaTensor() { this.mass = 0; let ixx = 0; let iyy = 0; let izz = 0; let ixy = 0; let ixz = 0; let iyz = 0; this.fields.forEach((field: Field) => { const mass = field.mass; const p = field.absolutePos; ixx += mass * (p.y * p.y + p.z * p.z); iyy += mass * (p.x * p.x + p.z * p.z); izz += mass * (p.x * p.x + p.y * p.y); ixy -= mass * p.x * p.y; ixz -= mass * p.x * p.z; iyz -= mass * p.y * p.z; this.mass += mass; }); this.momentOfInertia = new THREE.Matrix3(); this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz); this.invMomentOfInertia = new THREE.Matrix3(); this.invMomentOfInertia.copy(this.momentOfInertia).invert(); } updateHotSpot(timestep: number) { const len = this.hotSpot.force.length(); if (len > 0) { this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE)); } } setHotSpot(position: THREE.Vector3, force: THREE.Vector3) { this.hotSpot = { position, force }; } setDensity(density: number) { this.density = density; } removeUnnecessaryFields() { this.fields.forEach((f: Field) => { if (!f.alive) { this.deleteField(f.id); } }); } addField(props: Omit<IFieldOptions, "plate">) { const field = new Field({ ...props, plate: this }); this.addExistingField(field); return field; } addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); if (!this.fields.has(id)) { return this.addField({ ...props, id }); } } addExistingField(field: Field) { const id = field.id; field.plate = this; this.fields.set(id, field); if (this.adjacentFields.has(id)) { this.adjacentFields.delete(id); } field.adjacentFields.forEach((adjFieldId: number) => { if (!this.fields.has(adjFieldId)) { this.addAdjacentField(adjFieldId); } else { const adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = adjField.isBoundary(); } } }); field.boundary = field.isBoundary(); return field; }
(id: number) { const field = this.fields.get(id); if (!field) { return; } this.fields.delete(id); this.subplate.deleteField(id); this.addAdjacentField(id); field.adjacentFields.forEach((adjFieldId: number) => { let adjField = this.adjacentFields.get(adjFieldId); if (adjField && !adjField.isAdjacentField()) { this.adjacentFields.delete(adjFieldId); } adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = true; } }); } addAdjacentField(id: number) { if (!this.adjacentFields.has(id)) { const newField = new Field({ id, plate: this, adjacent: true }); if (newField.isAdjacentField()) { this.adjacentFields.set(id, newField); } } } neighborsCount(absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); let count = 0; getGrid().fields[id].adjacentFields.forEach((adjId: number) => { if (this.fields.has(adjId)) { count += 1; } }); return count; } calculateContinentBuffers() { const grid = getGrid(); const queue: Field[] = []; const dist: Record<string, number> = {}; const getDist = (field: Field) => { const id = field.id; if (dist[id] !== undefined) { return dist[id]; } return Infinity; }; this.forEachField((field: Field) => { field.isContinentBuffer = false; if (field.continentalCrust) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > grid.fieldDiameterInKm) { dist[adjField.id] = grid.fieldDiameterInKm; queue.push(adjField); } }); } }); while (queue.length > 0) { const field = queue.shift() as Field; field.isContinentBuffer = true; const newDist = getDist(field) + grid.fieldDiameterInKm; if (newDist < config.continentBufferWidth) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > newDist) { dist[adjField.id] = newDist; queue.push(adjField); } }); } } } addToSubplate(field: Field) { field.alive = false; this.subplate.addField(field); } // Returns fields adjacent to the whole plate which will be probably added to it soon (around divergent boundaries). getVisibleAdjacentFields() { const result: Field[] = []; this.adjacentFields.forEach((field: Field) => { if (field.noCollisionDist > 0) { result.push(field); } }); return result; } sortFields() { // Sort fields by ID. Map traversal follows insertion order. // This is not necessary, but it lets us test model better. Quaternion and physical properties are often calculated // by traversing all the fields. Order of this traverse might influence micro numerical errors that can create // visible differences in a longer run. Example of a place where it matters: plate-division-merge.test.ts this.fields = new Map<number, Field>(Array.from(this.fields.entries()).sort((a, b) => a[0] - b[0])); this.adjacentFields = new Map<number, Field>(Array.from(this.adjacentFields.entries()).sort((a, b) => a[0] - b[0])); } }
deleteField
identifier_name
plate.ts
import * as THREE from "three"; import getGrid from "./grid"; import config from "../config"; import PlateBase from "./plate-base"; import Subplate, { ISerializedSubplate } from "./subplate"; import Field, { IFieldOptions, ISerializedField } from "./field"; import { IMatrix3Array, IQuaternionArray, IVec3Array } from "../types"; import PlateGroup, { ISerializedPlateGroup } from "./plate-group"; // The stronger initial plate force, the sooner it should be decreased. const HOT_SPOT_TORQUE_DECREASE = config.constantHotSpots ? 0 : 0.2 * config.userForce; const MIN_PLATE_SIZE = 100000; // km, roughly the size of a plate label interface IOptions { id: number; density?: number; hue?: number; } // See: https://app.zeplin.io/project/60c9c0d5060353bd2bb10172/screen/62768bda825a8d13749065fc // When these values get updated, remember to update arrows colors in boundary-config-dialog.less. // Other colors (plate labels, 3D arrows) will automatically pick up colors from this array. export const plateHues = [ 29, 186, 277, 47, 330, // up to 5 initial plates used by basic presets 71, 205, 166, 258, 359 // 5 extra colors for plates that might be created during simulation (plate division) ]; export interface ISerializedPlate { id: number; quaternion: IQuaternionArray; angularVelocity: IVec3Array; hue: number; density: number; mass: number; momentOfInertia: IMatrix3Array; invMomentOfInertia: IMatrix3Array; center: null | IVec3Array; hotSpot: { position: IVec3Array; force: IVec3Array; }; fields: ISerializedField[]; adjacentFields: ISerializedField[]; subplate: ISerializedSubplate; plateGroup: ISerializedPlateGroup | null; } export default class Plate extends PlateBase<Field> { id: number; density: number; hue: number; adjacentFields: Map<number, Field>; center: null | THREE.Vector3; invMomentOfInertia: THREE.Matrix3; momentOfInertia: THREE.Matrix3; mass: number; subplate: Subplate; quaternion: THREE.Quaternion; angularVelocity: THREE.Vector3; fields: Map<number, Field>; isSubplate = false; plateGroup: PlateGroup | null; hotSpot: { position: THREE.Vector3; force: THREE.Vector3; }; constructor({ id, density, hue }: IOptions) { super(); this.id = id; // Decides whether plate goes under or above another plate while subducting (ocean-ocean). this.density = density || 0; // Base color / hue of the plate used to visually identify it. this.hue = hue || 0; this.quaternion = new THREE.Quaternion(); this.angularVelocity = new THREE.Vector3(); this.fields = new Map(); this.adjacentFields = new Map(); // Physics properties: this.mass = 0; this.invMomentOfInertia = new THREE.Matrix3(); this.momentOfInertia = new THREE.Matrix3(); this.center = null; // Torque / force that is pushing plate. It might be constant or decrease with time (). this.hotSpot = { position: new THREE.Vector3(0, 0, 0), force: new THREE.Vector3(0, 0, 0) }; // Subplate is a container for some additional fields attached to this plate. // At this point mostly fields that were subducting under and were detached from the original plate. this.subplate = new Subplate(this); this.plateGroup = null; } serialize(): ISerializedPlate { return { id: this.id, quaternion: this.quaternion.toArray(), angularVelocity: this.angularVelocity.toArray(), hue: this.hue, density: this.density, mass: this.mass, momentOfInertia: this.momentOfInertia.toArray(), invMomentOfInertia: this.invMomentOfInertia.toArray(), center: this.center?.toArray() || null, hotSpot: { force: this.hotSpot.force.toArray(), position: this.hotSpot.position.toArray() }, fields: Array.from(this.fields.values()).map(field => field.serialize()), adjacentFields: Array.from(this.adjacentFields.values()).map(field => field.serialize()), subplate: this.subplate.serialize(), plateGroup: this.plateGroup?.serialize() || null }; } static deserialize(props: ISerializedPlate) { const plate = new Plate({ id: props.id }); plate.quaternion = (new THREE.Quaternion()).fromArray(props.quaternion); plate.angularVelocity = (new THREE.Vector3()).fromArray(props.angularVelocity); plate.hue = props.hue; plate.density = props.density; plate.mass = props.mass; plate.momentOfInertia = (new THREE.Matrix3()).fromArray(props.momentOfInertia); plate.invMomentOfInertia = (new THREE.Matrix3()).fromArray(props.invMomentOfInertia); plate.center = props.center && (new THREE.Vector3()).fromArray(props.center); plate.hotSpot.force = (new THREE.Vector3()).fromArray(props.hotSpot.force); plate.hotSpot.position = (new THREE.Vector3()).fromArray(props.hotSpot.position); props.fields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.fields.set(field.id, field); }); props.adjacentFields.forEach((serializedField: ISerializedField) => { const field = Field.deserialize(serializedField, plate); plate.adjacentFields.set(field.id, field); }); plate.subplate = props.subplate && Subplate.deserialize(props.subplate, plate); plate.plateGroup = null; // this needs to be deserialized by parent (model) that has access to all the plates return plate; } // It depends on current angular velocity and velocities of other, colliding plates. // Note that this is pretty expensive to calculate, so if used much, the current value should be cached. get totalTorque() { const totalTorque = this.hotSpot.position.clone().cross(this.hotSpot.force); this.fields.forEach((field: Field) => { totalTorque.add(field.torque); }); return totalTorque; } get angularAcceleration() { if (this.plateGroup) { return this.plateGroup.angularAcceleration; } return this.totalTorque.applyMatrix3(this.invMomentOfInertia); } mergedWith(anotherPlate: Plate) { if (!this.plateGroup || !anotherPlate.plateGroup) { return false; } return this.plateGroup === anotherPlate.plateGroup; } updateCenter() { const safeFields: Record<string, Field> = {}; const safeSum = new THREE.Vector3(); let safeArea = 0; this.fields.forEach((field: Field) => { if (!field.subduction) { let safe = true; // Some subducting fields do not get marked because they move so slowly // Ignore fields adjacent to subducting fields just to be safe field.forEachNeighbor((neighbor: Field) => { if (neighbor.subduction) { safe = false; } }); if (safe) { safeFields[field.id] = field; safeSum.add(field.absolutePos); safeArea += field.area; } } }); if (safeArea < MIN_PLATE_SIZE) { // If the visible area of a plate is too small, don't bother labelling this.center = new THREE.Vector3(); } else { // Otherwise, use the field nearest the center const geographicCenter = safeSum.normalize(); let closestPoint = new THREE.Vector3(0, 0, 0); let minDist = Number.MAX_VALUE; for (const id in safeFields) { const field = safeFields[id]; const dist = field.absolutePos.distanceTo(geographicCenter); if (dist < minDist) { closestPoint = field.absolutePos; minDist = dist; } } this.center = closestPoint; } } updateInertiaTensor() { this.mass = 0; let ixx = 0; let iyy = 0; let izz = 0; let ixy = 0; let ixz = 0; let iyz = 0; this.fields.forEach((field: Field) => { const mass = field.mass; const p = field.absolutePos; ixx += mass * (p.y * p.y + p.z * p.z); iyy += mass * (p.x * p.x + p.z * p.z); izz += mass * (p.x * p.x + p.y * p.y); ixy -= mass * p.x * p.y; ixz -= mass * p.x * p.z; iyz -= mass * p.y * p.z; this.mass += mass; }); this.momentOfInertia = new THREE.Matrix3(); this.momentOfInertia.set(ixx, ixy, ixz, ixy, iyy, iyz, ixz, iyz, izz); this.invMomentOfInertia = new THREE.Matrix3(); this.invMomentOfInertia.copy(this.momentOfInertia).invert(); } updateHotSpot(timestep: number) { const len = this.hotSpot.force.length(); if (len > 0) { this.hotSpot.force.setLength(Math.max(0, len - timestep * HOT_SPOT_TORQUE_DECREASE)); } } setHotSpot(position: THREE.Vector3, force: THREE.Vector3) { this.hotSpot = { position, force }; } setDensity(density: number) { this.density = density; } removeUnnecessaryFields() { this.fields.forEach((f: Field) => { if (!f.alive) { this.deleteField(f.id); } }); } addField(props: Omit<IFieldOptions, "plate">) { const field = new Field({ ...props, plate: this }); this.addExistingField(field); return field; } addFieldAt(props: Omit<IFieldOptions, "id" | "plate">, absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); if (!this.fields.has(id)) { return this.addField({ ...props, id }); } } addExistingField(field: Field) { const id = field.id; field.plate = this; this.fields.set(id, field); if (this.adjacentFields.has(id)) { this.adjacentFields.delete(id); } field.adjacentFields.forEach((adjFieldId: number) => { if (!this.fields.has(adjFieldId)) { this.addAdjacentField(adjFieldId); } else { const adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = adjField.isBoundary(); } } }); field.boundary = field.isBoundary(); return field; } deleteField(id: number) { const field = this.fields.get(id); if (!field) { return; } this.fields.delete(id); this.subplate.deleteField(id); this.addAdjacentField(id); field.adjacentFields.forEach((adjFieldId: number) => { let adjField = this.adjacentFields.get(adjFieldId); if (adjField && !adjField.isAdjacentField()) { this.adjacentFields.delete(adjFieldId); } adjField = this.fields.get(adjFieldId); if (adjField) { adjField.boundary = true; } }); } addAdjacentField(id: number) { if (!this.adjacentFields.has(id)) { const newField = new Field({ id, plate: this, adjacent: true }); if (newField.isAdjacentField()) { this.adjacentFields.set(id, newField); } } } neighborsCount(absolutePos: THREE.Vector3) { const localPos = this.localPosition(absolutePos); const id = getGrid().nearestFieldId(localPos); let count = 0; getGrid().fields[id].adjacentFields.forEach((adjId: number) => { if (this.fields.has(adjId)) { count += 1; } }); return count; } calculateContinentBuffers() { const grid = getGrid(); const queue: Field[] = []; const dist: Record<string, number> = {}; const getDist = (field: Field) => { const id = field.id; if (dist[id] !== undefined) { return dist[id]; } return Infinity; }; this.forEachField((field: Field) => { field.isContinentBuffer = false; if (field.continentalCrust) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > grid.fieldDiameterInKm)
}); } }); while (queue.length > 0) { const field = queue.shift() as Field; field.isContinentBuffer = true; const newDist = getDist(field) + grid.fieldDiameterInKm; if (newDist < config.continentBufferWidth) { field.forEachNeighbor((adjField: Field) => { if (adjField.oceanicCrust && getDist(adjField) > newDist) { dist[adjField.id] = newDist; queue.push(adjField); } }); } } } addToSubplate(field: Field) { field.alive = false; this.subplate.addField(field); } // Returns fields adjacent to the whole plate which will be probably added to it soon (around divergent boundaries). getVisibleAdjacentFields() { const result: Field[] = []; this.adjacentFields.forEach((field: Field) => { if (field.noCollisionDist > 0) { result.push(field); } }); return result; } sortFields() { // Sort fields by ID. Map traversal follows insertion order. // This is not necessary, but it lets us test model better. Quaternion and physical properties are often calculated // by traversing all the fields. Order of this traverse might influence micro numerical errors that can create // visible differences in a longer run. Example of a place where it matters: plate-division-merge.test.ts this.fields = new Map<number, Field>(Array.from(this.fields.entries()).sort((a, b) => a[0] - b[0])); this.adjacentFields = new Map<number, Field>(Array.from(this.adjacentFields.entries()).sort((a, b) => a[0] - b[0])); } }
{ dist[adjField.id] = grid.fieldDiameterInKm; queue.push(adjField); }
conditional_block
utils.py
"""Utilities. Mostly periodic checks. Everything that is neither core nor gui contents (for use): - run() -- call once on startup. takes care of all automatic tasks - send_email() -- send an email - get_name() -- get a pretty name - get_book_data() -- attempt to get data about a book based on the ISBN (first local DB, then DNB). to add late handlers, append them to late_handlers. they will receive arguments as specified in late_books() """ import base64 import tempfile import email import smtplib import ssl from datetime import datetime, timedelta, date import time import threading import shutil import os import ftplib import ftputil import requests import logging import re import bs4 import string try: from cryptography import fernet except ImportError: fernet = None from buchschloss import core, config class FormattedDate(date): """print a datetime.date as specified in config.core.date_format""" def __str__(self): return self.strftime(config.core.date_format) @classmethod def fromdate(cls, date_: date): """Create a FormattedDate from a datetime.date""" if date_ is None: return None else: return cls(date_.year, date_.month, date_.day) def todate(self): """transform self to a datetime.date""" return date(self.year, self.month, self.day) def run_checks(): """Run stuff to do as specified by times set in config""" while True: if datetime.now() > core.misc_data.check_date+timedelta(minutes=45): for stuff in stuff_to_do: threading.Thread(target=stuff).start() core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every time.sleep(5*60*60) def late_books(): """Check for late and nearly late books. Call the functions in late_handlers with arguments (late, warn). late and warn are sequences of core.Borrow instances. """ late = [] warn = [] today = date.today() for b in core.Borrow.search(( ('is_back', 'eq', False), 'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))): if b.return_date < today: late.append(b) else: warn.append(b) for h in late_handlers: h(late, warn) def backup(): """Local backups. Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config """ backup_shift(os, config.utils.tasks.backup_depth) if config.utils.tasks.secret_key is None: shutil.copyfile(config.core.database_name, config.core.database_name+'.1') else: data = get_encrypted_database() with open(config.core.database_name+'.1', 'wb') as f: f.write(data) def get_encrypted_database(): """get the encrypted contents of the database file""" if fernet is None: raise RuntimeError('encryption requested, but no cryptography available') with open(config.core.database_name, 'rb') as f: plain = f.read() key = base64.urlsafe_b64encode(config.utils.tasks.secret_key) cipher = fernet.Fernet(key).encrypt(plain) return base64.urlsafe_b64decode(cipher) def web_backup(): """Remote backups. Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config """ conf = config.utils if conf.tasks.secret_key is None: upload_path = config.core.database_name file = None else: file = tempfile.NamedTemporaryFile(delete=False) file.write(get_encrypted_database()) file.close() upload_path = file.name factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP # noinspection PyDeprecation with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password, session_factory=factory, use_list_a_option=False) as host: backup_shift(host, conf.tasks.web_backup_depth) host.upload(upload_path, config.core.database_name+'.1') if file is not None: os.unlink(file.name) def backup_shift(fs, depth): """shift all name.number up one number to the given depth in the given filesystem (os or remote FTP host)""" number_name = lambda n: '.'.join((config.core.database_name, str(n))) try: fs.remove(number_name(depth)) except FileNotFoundError: pass for f in range(depth, 1, -1): try: fs.rename(number_name(f-1), number_name(f)) except FileNotFoundError: pass def send_email(subject, text): """Send an email to the recipient specified in config""" cfg = config.utils.email msg = email.message.Message() msg['From'] = cfg['from'] msg['To'] = cfg.recipient msg['Subject'] = subject msg.set_payload(text) try: with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn: if cfg.smtp.tls: conn.starttls(context=ssl.create_default_context()) if cfg.smtp.username is not None: conn.login(cfg.smtp.username, cfg.smtp.password) conn.send_message(msg) except smtplib.SMTPException as e: logging.error('error while sending email: {}: {}'.format(type(e).__name__, e)) def get_name(internal: str): """Get an end-user suitable name. Try lookup in config.utils.names. "__" is replaced by ": " with components looked up individually If a name isn't found, a warning is logged and the internal name returned, potentially modified "<namespace>::<name>" may specify a namespace in which lookups are performed first, falling back to the global names if nothing is found "__" takes precedence over "::" """ if '__' in internal: return ': '.join(get_name(s) for s in internal.split('__')) *path, name = internal.split('::') current = config.utils.names look_in = [current] try: for k in path: current = current[k] look_in.append(current) except KeyError: # noinspection PyUnboundLocalVariable logging.warning('invalid namespace {!r} of {!r}'.format(k, internal)) look_in.reverse() for ns in look_in: try: val = ns[name] if isinstance(val, str): return val elif isinstance(val, dict): return val['*this*'] else: raise TypeError('{!r} is neither dict nor str'.format(val)) except KeyError: pass logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name]))) return '::'.join(path+[name]) def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace): """Insert newlines every `size` characters. Insert '\n' before the given amount of characters if a character in `break_char` is encountered. If the character is in `cut_char`, it is replaced by the newline. """ # TODO: move to misc break_char += cut_char r = [] while len(text) > size: i = size cut = False while i: if text[i] in break_char: cut = text[i] in cut_char break i -= 1 else: i = size-1 i += 1 r.append(text[:i-cut]) text = text[i:] r.append(text) return '\n'.join(r)
def get_book_data(isbn: int): """Attempt to get book data via the ISBN from the DB, if that fails, try the DNB (https://portal.dnb.de)""" try: book = next(iter(core.Book.search(('isbn', 'eq', isbn)))) except StopIteration: pass # actually, I could put the whole rest of the function here else: data = core.Book.view_str(book.id) del data['id'], data['status'], data['return_date'], data['borrowed_by'] del data['borrowed_by_id'], data['__str__'] return data try: r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D' + str(isbn) + '&method=simpleSearch&cqlMode=true') r.raise_for_status() except requests.exceptions.RequestException: raise core.BuchSchlossError('no_connection', 'no_connection') person_re = re.compile(r'(\w*, \w*) \((\w*)\)') results = {'concerned_people': []} page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') if table is None: # see if we got multiple results link_to_first = page.select_one('#recordLink_0') if link_to_first is None: raise core.BuchSchlossError( 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn) r = requests.get('https://portal.dnb.de'+link_to_first['href']) page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') for tr in table.select('tr'): td = [x.get_text('\n').strip() for x in tr.select('td')] if len(td) == 2: if td[0] == 'Titel': results['title'] = td[1].split('/')[0].strip() elif td[0] == 'Person(en)': for p in td[1].split('\n'): g = person_re.search(p) if g is None: continue g = g.groups() if g[1] == 'Verfasser': results['author'] = g[0] else: results['concerned_people'].append(g[1]+': '+g[0]) elif td[0] == 'Verlag': results['publisher'] = td[1].split(':')[1].strip() elif td[0] == 'Zeitliche Einordnung': results['year'] = td[1].split(':')[1].strip() elif td[0] == 'Sprache(n)': results['language'] = td[1].split(',')[0].split()[0].strip() results['concerned_people'] = '; '.join(results['concerned_people']) return results def run(): """handling function.""" for k in config.utils.tasks.startup: threading.Thread(target=globals()[k], daemon=True).start() threading.Thread(target=run_checks, daemon=True).start() def _default_late_handler(late, warn): head = datetime.now().strftime(config.core.date_format).join(('\n\n',)) with open('late.txt', 'w') as f: f.write(head) f.write('\n'.join(str(L) for L in late)) with open('warn.txt', 'w') as f: f.write(head) f.write('\n'.join(str(w) for w in warn)) late_handlers = [_default_late_handler] stuff_to_do = [globals()[k] for k in config.utils.tasks.recurring]
random_line_split
utils.py
"""Utilities. Mostly periodic checks. Everything that is neither core nor gui contents (for use): - run() -- call once on startup. takes care of all automatic tasks - send_email() -- send an email - get_name() -- get a pretty name - get_book_data() -- attempt to get data about a book based on the ISBN (first local DB, then DNB). to add late handlers, append them to late_handlers. they will receive arguments as specified in late_books() """ import base64 import tempfile import email import smtplib import ssl from datetime import datetime, timedelta, date import time import threading import shutil import os import ftplib import ftputil import requests import logging import re import bs4 import string try: from cryptography import fernet except ImportError: fernet = None from buchschloss import core, config class FormattedDate(date): """print a datetime.date as specified in config.core.date_format""" def __str__(self): return self.strftime(config.core.date_format) @classmethod def fromdate(cls, date_: date): """Create a FormattedDate from a datetime.date""" if date_ is None: return None else: return cls(date_.year, date_.month, date_.day) def todate(self): """transform self to a datetime.date""" return date(self.year, self.month, self.day) def run_checks(): """Run stuff to do as specified by times set in config""" while True: if datetime.now() > core.misc_data.check_date+timedelta(minutes=45): for stuff in stuff_to_do: threading.Thread(target=stuff).start() core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every time.sleep(5*60*60) def late_books(): """Check for late and nearly late books. Call the functions in late_handlers with arguments (late, warn). late and warn are sequences of core.Borrow instances. """ late = [] warn = [] today = date.today() for b in core.Borrow.search(( ('is_back', 'eq', False), 'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))): if b.return_date < today: late.append(b) else: warn.append(b) for h in late_handlers: h(late, warn) def backup(): """Local backups. Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config """ backup_shift(os, config.utils.tasks.backup_depth) if config.utils.tasks.secret_key is None: shutil.copyfile(config.core.database_name, config.core.database_name+'.1') else: data = get_encrypted_database() with open(config.core.database_name+'.1', 'wb') as f: f.write(data) def
(): """get the encrypted contents of the database file""" if fernet is None: raise RuntimeError('encryption requested, but no cryptography available') with open(config.core.database_name, 'rb') as f: plain = f.read() key = base64.urlsafe_b64encode(config.utils.tasks.secret_key) cipher = fernet.Fernet(key).encrypt(plain) return base64.urlsafe_b64decode(cipher) def web_backup(): """Remote backups. Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config """ conf = config.utils if conf.tasks.secret_key is None: upload_path = config.core.database_name file = None else: file = tempfile.NamedTemporaryFile(delete=False) file.write(get_encrypted_database()) file.close() upload_path = file.name factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP # noinspection PyDeprecation with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password, session_factory=factory, use_list_a_option=False) as host: backup_shift(host, conf.tasks.web_backup_depth) host.upload(upload_path, config.core.database_name+'.1') if file is not None: os.unlink(file.name) def backup_shift(fs, depth): """shift all name.number up one number to the given depth in the given filesystem (os or remote FTP host)""" number_name = lambda n: '.'.join((config.core.database_name, str(n))) try: fs.remove(number_name(depth)) except FileNotFoundError: pass for f in range(depth, 1, -1): try: fs.rename(number_name(f-1), number_name(f)) except FileNotFoundError: pass def send_email(subject, text): """Send an email to the recipient specified in config""" cfg = config.utils.email msg = email.message.Message() msg['From'] = cfg['from'] msg['To'] = cfg.recipient msg['Subject'] = subject msg.set_payload(text) try: with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn: if cfg.smtp.tls: conn.starttls(context=ssl.create_default_context()) if cfg.smtp.username is not None: conn.login(cfg.smtp.username, cfg.smtp.password) conn.send_message(msg) except smtplib.SMTPException as e: logging.error('error while sending email: {}: {}'.format(type(e).__name__, e)) def get_name(internal: str): """Get an end-user suitable name. Try lookup in config.utils.names. "__" is replaced by ": " with components looked up individually If a name isn't found, a warning is logged and the internal name returned, potentially modified "<namespace>::<name>" may specify a namespace in which lookups are performed first, falling back to the global names if nothing is found "__" takes precedence over "::" """ if '__' in internal: return ': '.join(get_name(s) for s in internal.split('__')) *path, name = internal.split('::') current = config.utils.names look_in = [current] try: for k in path: current = current[k] look_in.append(current) except KeyError: # noinspection PyUnboundLocalVariable logging.warning('invalid namespace {!r} of {!r}'.format(k, internal)) look_in.reverse() for ns in look_in: try: val = ns[name] if isinstance(val, str): return val elif isinstance(val, dict): return val['*this*'] else: raise TypeError('{!r} is neither dict nor str'.format(val)) except KeyError: pass logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name]))) return '::'.join(path+[name]) def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace): """Insert newlines every `size` characters. Insert '\n' before the given amount of characters if a character in `break_char` is encountered. If the character is in `cut_char`, it is replaced by the newline. """ # TODO: move to misc break_char += cut_char r = [] while len(text) > size: i = size cut = False while i: if text[i] in break_char: cut = text[i] in cut_char break i -= 1 else: i = size-1 i += 1 r.append(text[:i-cut]) text = text[i:] r.append(text) return '\n'.join(r) def get_book_data(isbn: int): """Attempt to get book data via the ISBN from the DB, if that fails, try the DNB (https://portal.dnb.de)""" try: book = next(iter(core.Book.search(('isbn', 'eq', isbn)))) except StopIteration: pass # actually, I could put the whole rest of the function here else: data = core.Book.view_str(book.id) del data['id'], data['status'], data['return_date'], data['borrowed_by'] del data['borrowed_by_id'], data['__str__'] return data try: r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D' + str(isbn) + '&method=simpleSearch&cqlMode=true') r.raise_for_status() except requests.exceptions.RequestException: raise core.BuchSchlossError('no_connection', 'no_connection') person_re = re.compile(r'(\w*, \w*) \((\w*)\)') results = {'concerned_people': []} page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') if table is None: # see if we got multiple results link_to_first = page.select_one('#recordLink_0') if link_to_first is None: raise core.BuchSchlossError( 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn) r = requests.get('https://portal.dnb.de'+link_to_first['href']) page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') for tr in table.select('tr'): td = [x.get_text('\n').strip() for x in tr.select('td')] if len(td) == 2: if td[0] == 'Titel': results['title'] = td[1].split('/')[0].strip() elif td[0] == 'Person(en)': for p in td[1].split('\n'): g = person_re.search(p) if g is None: continue g = g.groups() if g[1] == 'Verfasser': results['author'] = g[0] else: results['concerned_people'].append(g[1]+': '+g[0]) elif td[0] == 'Verlag': results['publisher'] = td[1].split(':')[1].strip() elif td[0] == 'Zeitliche Einordnung': results['year'] = td[1].split(':')[1].strip() elif td[0] == 'Sprache(n)': results['language'] = td[1].split(',')[0].split()[0].strip() results['concerned_people'] = '; '.join(results['concerned_people']) return results def run(): """handling function.""" for k in config.utils.tasks.startup: threading.Thread(target=globals()[k], daemon=True).start() threading.Thread(target=run_checks, daemon=True).start() def _default_late_handler(late, warn): head = datetime.now().strftime(config.core.date_format).join(('\n\n',)) with open('late.txt', 'w') as f: f.write(head) f.write('\n'.join(str(L) for L in late)) with open('warn.txt', 'w') as f: f.write(head) f.write('\n'.join(str(w) for w in warn)) late_handlers = [_default_late_handler] stuff_to_do = [globals()[k] for k in config.utils.tasks.recurring]
get_encrypted_database
identifier_name
utils.py
"""Utilities. Mostly periodic checks. Everything that is neither core nor gui contents (for use): - run() -- call once on startup. takes care of all automatic tasks - send_email() -- send an email - get_name() -- get a pretty name - get_book_data() -- attempt to get data about a book based on the ISBN (first local DB, then DNB). to add late handlers, append them to late_handlers. they will receive arguments as specified in late_books() """ import base64 import tempfile import email import smtplib import ssl from datetime import datetime, timedelta, date import time import threading import shutil import os import ftplib import ftputil import requests import logging import re import bs4 import string try: from cryptography import fernet except ImportError: fernet = None from buchschloss import core, config class FormattedDate(date): """print a datetime.date as specified in config.core.date_format""" def __str__(self): return self.strftime(config.core.date_format) @classmethod def fromdate(cls, date_: date): """Create a FormattedDate from a datetime.date""" if date_ is None: return None else: return cls(date_.year, date_.month, date_.day) def todate(self): """transform self to a datetime.date""" return date(self.year, self.month, self.day) def run_checks():
def late_books(): """Check for late and nearly late books. Call the functions in late_handlers with arguments (late, warn). late and warn are sequences of core.Borrow instances. """ late = [] warn = [] today = date.today() for b in core.Borrow.search(( ('is_back', 'eq', False), 'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))): if b.return_date < today: late.append(b) else: warn.append(b) for h in late_handlers: h(late, warn) def backup(): """Local backups. Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config """ backup_shift(os, config.utils.tasks.backup_depth) if config.utils.tasks.secret_key is None: shutil.copyfile(config.core.database_name, config.core.database_name+'.1') else: data = get_encrypted_database() with open(config.core.database_name+'.1', 'wb') as f: f.write(data) def get_encrypted_database(): """get the encrypted contents of the database file""" if fernet is None: raise RuntimeError('encryption requested, but no cryptography available') with open(config.core.database_name, 'rb') as f: plain = f.read() key = base64.urlsafe_b64encode(config.utils.tasks.secret_key) cipher = fernet.Fernet(key).encrypt(plain) return base64.urlsafe_b64decode(cipher) def web_backup(): """Remote backups. Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config """ conf = config.utils if conf.tasks.secret_key is None: upload_path = config.core.database_name file = None else: file = tempfile.NamedTemporaryFile(delete=False) file.write(get_encrypted_database()) file.close() upload_path = file.name factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP # noinspection PyDeprecation with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password, session_factory=factory, use_list_a_option=False) as host: backup_shift(host, conf.tasks.web_backup_depth) host.upload(upload_path, config.core.database_name+'.1') if file is not None: os.unlink(file.name) def backup_shift(fs, depth): """shift all name.number up one number to the given depth in the given filesystem (os or remote FTP host)""" number_name = lambda n: '.'.join((config.core.database_name, str(n))) try: fs.remove(number_name(depth)) except FileNotFoundError: pass for f in range(depth, 1, -1): try: fs.rename(number_name(f-1), number_name(f)) except FileNotFoundError: pass def send_email(subject, text): """Send an email to the recipient specified in config""" cfg = config.utils.email msg = email.message.Message() msg['From'] = cfg['from'] msg['To'] = cfg.recipient msg['Subject'] = subject msg.set_payload(text) try: with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn: if cfg.smtp.tls: conn.starttls(context=ssl.create_default_context()) if cfg.smtp.username is not None: conn.login(cfg.smtp.username, cfg.smtp.password) conn.send_message(msg) except smtplib.SMTPException as e: logging.error('error while sending email: {}: {}'.format(type(e).__name__, e)) def get_name(internal: str): """Get an end-user suitable name. Try lookup in config.utils.names. "__" is replaced by ": " with components looked up individually If a name isn't found, a warning is logged and the internal name returned, potentially modified "<namespace>::<name>" may specify a namespace in which lookups are performed first, falling back to the global names if nothing is found "__" takes precedence over "::" """ if '__' in internal: return ': '.join(get_name(s) for s in internal.split('__')) *path, name = internal.split('::') current = config.utils.names look_in = [current] try: for k in path: current = current[k] look_in.append(current) except KeyError: # noinspection PyUnboundLocalVariable logging.warning('invalid namespace {!r} of {!r}'.format(k, internal)) look_in.reverse() for ns in look_in: try: val = ns[name] if isinstance(val, str): return val elif isinstance(val, dict): return val['*this*'] else: raise TypeError('{!r} is neither dict nor str'.format(val)) except KeyError: pass logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name]))) return '::'.join(path+[name]) def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace): """Insert newlines every `size` characters. Insert '\n' before the given amount of characters if a character in `break_char` is encountered. If the character is in `cut_char`, it is replaced by the newline. """ # TODO: move to misc break_char += cut_char r = [] while len(text) > size: i = size cut = False while i: if text[i] in break_char: cut = text[i] in cut_char break i -= 1 else: i = size-1 i += 1 r.append(text[:i-cut]) text = text[i:] r.append(text) return '\n'.join(r) def get_book_data(isbn: int): """Attempt to get book data via the ISBN from the DB, if that fails, try the DNB (https://portal.dnb.de)""" try: book = next(iter(core.Book.search(('isbn', 'eq', isbn)))) except StopIteration: pass # actually, I could put the whole rest of the function here else: data = core.Book.view_str(book.id) del data['id'], data['status'], data['return_date'], data['borrowed_by'] del data['borrowed_by_id'], data['__str__'] return data try: r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D' + str(isbn) + '&method=simpleSearch&cqlMode=true') r.raise_for_status() except requests.exceptions.RequestException: raise core.BuchSchlossError('no_connection', 'no_connection') person_re = re.compile(r'(\w*, \w*) \((\w*)\)') results = {'concerned_people': []} page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') if table is None: # see if we got multiple results link_to_first = page.select_one('#recordLink_0') if link_to_first is None: raise core.BuchSchlossError( 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn) r = requests.get('https://portal.dnb.de'+link_to_first['href']) page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') for tr in table.select('tr'): td = [x.get_text('\n').strip() for x in tr.select('td')] if len(td) == 2: if td[0] == 'Titel': results['title'] = td[1].split('/')[0].strip() elif td[0] == 'Person(en)': for p in td[1].split('\n'): g = person_re.search(p) if g is None: continue g = g.groups() if g[1] == 'Verfasser': results['author'] = g[0] else: results['concerned_people'].append(g[1]+': '+g[0]) elif td[0] == 'Verlag': results['publisher'] = td[1].split(':')[1].strip() elif td[0] == 'Zeitliche Einordnung': results['year'] = td[1].split(':')[1].strip() elif td[0] == 'Sprache(n)': results['language'] = td[1].split(',')[0].split()[0].strip() results['concerned_people'] = '; '.join(results['concerned_people']) return results def run(): """handling function.""" for k in config.utils.tasks.startup: threading.Thread(target=globals()[k], daemon=True).start() threading.Thread(target=run_checks, daemon=True).start() def _default_late_handler(late, warn): head = datetime.now().strftime(config.core.date_format).join(('\n\n',)) with open('late.txt', 'w') as f: f.write(head) f.write('\n'.join(str(L) for L in late)) with open('warn.txt', 'w') as f: f.write(head) f.write('\n'.join(str(w) for w in warn)) late_handlers = [_default_late_handler] stuff_to_do = [globals()[k] for k in config.utils.tasks.recurring]
"""Run stuff to do as specified by times set in config""" while True: if datetime.now() > core.misc_data.check_date+timedelta(minutes=45): for stuff in stuff_to_do: threading.Thread(target=stuff).start() core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every time.sleep(5*60*60)
identifier_body
utils.py
"""Utilities. Mostly periodic checks. Everything that is neither core nor gui contents (for use): - run() -- call once on startup. takes care of all automatic tasks - send_email() -- send an email - get_name() -- get a pretty name - get_book_data() -- attempt to get data about a book based on the ISBN (first local DB, then DNB). to add late handlers, append them to late_handlers. they will receive arguments as specified in late_books() """ import base64 import tempfile import email import smtplib import ssl from datetime import datetime, timedelta, date import time import threading import shutil import os import ftplib import ftputil import requests import logging import re import bs4 import string try: from cryptography import fernet except ImportError: fernet = None from buchschloss import core, config class FormattedDate(date): """print a datetime.date as specified in config.core.date_format""" def __str__(self): return self.strftime(config.core.date_format) @classmethod def fromdate(cls, date_: date): """Create a FormattedDate from a datetime.date""" if date_ is None: return None else: return cls(date_.year, date_.month, date_.day) def todate(self): """transform self to a datetime.date""" return date(self.year, self.month, self.day) def run_checks(): """Run stuff to do as specified by times set in config""" while True: if datetime.now() > core.misc_data.check_date+timedelta(minutes=45): for stuff in stuff_to_do: threading.Thread(target=stuff).start() core.misc_data.check_date = datetime.now() + config.utils.tasks.repeat_every time.sleep(5*60*60) def late_books(): """Check for late and nearly late books. Call the functions in late_handlers with arguments (late, warn). late and warn are sequences of core.Borrow instances. """ late = [] warn = [] today = date.today() for b in core.Borrow.search(( ('is_back', 'eq', False), 'and', ('return_date', 'gt', today+config.utils.late_books_warn_time))): if b.return_date < today: late.append(b) else: warn.append(b) for h in late_handlers: h(late, warn) def backup(): """Local backups. Run backup_shift and copy "name" db to "name.1", encrypting if a key is given in config """ backup_shift(os, config.utils.tasks.backup_depth) if config.utils.tasks.secret_key is None: shutil.copyfile(config.core.database_name, config.core.database_name+'.1') else: data = get_encrypted_database() with open(config.core.database_name+'.1', 'wb') as f: f.write(data) def get_encrypted_database(): """get the encrypted contents of the database file""" if fernet is None: raise RuntimeError('encryption requested, but no cryptography available') with open(config.core.database_name, 'rb') as f: plain = f.read() key = base64.urlsafe_b64encode(config.utils.tasks.secret_key) cipher = fernet.Fernet(key).encrypt(plain) return base64.urlsafe_b64decode(cipher) def web_backup(): """Remote backups. Run backup_shift and upload "name" DB as "name.1", encrypted if a key is given in config """ conf = config.utils if conf.tasks.secret_key is None: upload_path = config.core.database_name file = None else: file = tempfile.NamedTemporaryFile(delete=False) file.write(get_encrypted_database()) file.close() upload_path = file.name factory = ftplib.FTP_TLS if conf.tls else ftplib.FTP # noinspection PyDeprecation with ftputil.FTPHost(conf.ftp.host, conf.ftp.username, conf.ftp.password, session_factory=factory, use_list_a_option=False) as host: backup_shift(host, conf.tasks.web_backup_depth) host.upload(upload_path, config.core.database_name+'.1') if file is not None: os.unlink(file.name) def backup_shift(fs, depth): """shift all name.number up one number to the given depth in the given filesystem (os or remote FTP host)""" number_name = lambda n: '.'.join((config.core.database_name, str(n))) try: fs.remove(number_name(depth)) except FileNotFoundError: pass for f in range(depth, 1, -1): try: fs.rename(number_name(f-1), number_name(f)) except FileNotFoundError: pass def send_email(subject, text): """Send an email to the recipient specified in config""" cfg = config.utils.email msg = email.message.Message() msg['From'] = cfg['from'] msg['To'] = cfg.recipient msg['Subject'] = subject msg.set_payload(text) try: with smtplib.SMTP(cfg.smtp.host, cfg.smtp.port) as conn: if cfg.smtp.tls: conn.starttls(context=ssl.create_default_context()) if cfg.smtp.username is not None: conn.login(cfg.smtp.username, cfg.smtp.password) conn.send_message(msg) except smtplib.SMTPException as e: logging.error('error while sending email: {}: {}'.format(type(e).__name__, e)) def get_name(internal: str): """Get an end-user suitable name. Try lookup in config.utils.names. "__" is replaced by ": " with components looked up individually If a name isn't found, a warning is logged and the internal name returned, potentially modified "<namespace>::<name>" may specify a namespace in which lookups are performed first, falling back to the global names if nothing is found "__" takes precedence over "::" """ if '__' in internal: return ': '.join(get_name(s) for s in internal.split('__')) *path, name = internal.split('::') current = config.utils.names look_in = [current] try: for k in path: current = current[k] look_in.append(current) except KeyError: # noinspection PyUnboundLocalVariable logging.warning('invalid namespace {!r} of {!r}'.format(k, internal)) look_in.reverse() for ns in look_in: try: val = ns[name] if isinstance(val, str): return val elif isinstance(val, dict): return val['*this*'] else: raise TypeError('{!r} is neither dict nor str'.format(val)) except KeyError: pass logging.warning('Name "{}" was not found in the namefile'.format('::'.join(path+[name]))) return '::'.join(path+[name]) def break_string(text, size, break_char=string.punctuation, cut_char=string.whitespace): """Insert newlines every `size` characters. Insert '\n' before the given amount of characters if a character in `break_char` is encountered. If the character is in `cut_char`, it is replaced by the newline. """ # TODO: move to misc break_char += cut_char r = [] while len(text) > size: i = size cut = False while i: if text[i] in break_char: cut = text[i] in cut_char break i -= 1 else: i = size-1 i += 1 r.append(text[:i-cut]) text = text[i:] r.append(text) return '\n'.join(r) def get_book_data(isbn: int): """Attempt to get book data via the ISBN from the DB, if that fails, try the DNB (https://portal.dnb.de)""" try: book = next(iter(core.Book.search(('isbn', 'eq', isbn)))) except StopIteration: pass # actually, I could put the whole rest of the function here else: data = core.Book.view_str(book.id) del data['id'], data['status'], data['return_date'], data['borrowed_by'] del data['borrowed_by_id'], data['__str__'] return data try: r = requests.get('https://portal.dnb.de/opac.htm?query=isbn%3D' + str(isbn) + '&method=simpleSearch&cqlMode=true') r.raise_for_status() except requests.exceptions.RequestException: raise core.BuchSchlossError('no_connection', 'no_connection') person_re = re.compile(r'(\w*, \w*) \((\w*)\)') results = {'concerned_people': []} page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') if table is None: # see if we got multiple results link_to_first = page.select_one('#recordLink_0') if link_to_first is None: raise core.BuchSchlossError( 'Book_not_found', 'Book_with_ISBN_{}_not_in_DNB', isbn) r = requests.get('https://portal.dnb.de'+link_to_first['href']) page = bs4.BeautifulSoup(r.text) table = page.select_one('#fullRecordTable') for tr in table.select('tr'): td = [x.get_text('\n').strip() for x in tr.select('td')] if len(td) == 2: if td[0] == 'Titel': results['title'] = td[1].split('/')[0].strip() elif td[0] == 'Person(en)':
elif td[0] == 'Verlag': results['publisher'] = td[1].split(':')[1].strip() elif td[0] == 'Zeitliche Einordnung': results['year'] = td[1].split(':')[1].strip() elif td[0] == 'Sprache(n)': results['language'] = td[1].split(',')[0].split()[0].strip() results['concerned_people'] = '; '.join(results['concerned_people']) return results def run(): """handling function.""" for k in config.utils.tasks.startup: threading.Thread(target=globals()[k], daemon=True).start() threading.Thread(target=run_checks, daemon=True).start() def _default_late_handler(late, warn): head = datetime.now().strftime(config.core.date_format).join(('\n\n',)) with open('late.txt', 'w') as f: f.write(head) f.write('\n'.join(str(L) for L in late)) with open('warn.txt', 'w') as f: f.write(head) f.write('\n'.join(str(w) for w in warn)) late_handlers = [_default_late_handler] stuff_to_do = [globals()[k] for k in config.utils.tasks.recurring]
for p in td[1].split('\n'): g = person_re.search(p) if g is None: continue g = g.groups() if g[1] == 'Verfasser': results['author'] = g[0] else: results['concerned_people'].append(g[1]+': '+g[0])
conditional_block
peer_connection.rs
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ fmt, future::Future, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::{Duration, Instant}, }; use log::*; use multiaddr::Multiaddr; use tokio::{ sync::{mpsc, oneshot}, time, }; use tokio_stream::StreamExt; use tracing::{self, span, Instrument, Level}; use super::{ direction::ConnectionDirection, error::{ConnectionManagerError, PeerConnectionError}, manager::ConnectionManagerEvent, }; #[cfg(feature = "rpc")] use crate::protocol::rpc::{ pool::RpcClientPool, pool::RpcPoolClient, NamedProtocolService, RpcClient, RpcClientBuilder, RpcError, RPC_MAX_FRAME_SIZE, }; use crate::{ framing, framing::CanonicalFraming, multiplexing::{Control, IncomingSubstreams, Substream, Yamux}, peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim}, protocol::{ProtocolId, ProtocolNegotiation}, utils::atomic_ref_counter::AtomicRefCounter, }; const LOG_TARGET: &str = "comms::connection_manager::peer_connection"; static ID_COUNTER: AtomicUsize = AtomicUsize::new(0); pub fn try_create( connection: Yamux, peer_addr: Multiaddr, peer_node_id: NodeId, peer_features: PeerFeatures, direction: ConnectionDirection, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, peer_identity_claim: PeerIdentityClaim, ) -> Result<PeerConnection, ConnectionManagerError> { trace!( target: LOG_TARGET, "(Peer={}) Socket successfully upgraded to multiplexed socket", peer_node_id.short_str() ); // All requests are request/response, so a channel size of 1 is all that is needed let (peer_tx, peer_rx) = mpsc::channel(1); let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic let substream_counter = connection.substream_counter(); let peer_conn = PeerConnection::new( id, peer_tx, peer_node_id.clone(), peer_features, peer_addr, direction, substream_counter, peer_identity_claim, ); let peer_actor = PeerConnectionActor::new( id, peer_node_id, direction, connection, peer_rx, event_notifier, our_supported_protocols, their_supported_protocols, ); tokio::spawn(peer_actor.run()); Ok(peer_conn) } /// Request types for the PeerConnection actor. #[derive(Debug)] pub enum PeerConnectionRequest { /// Open a new substream and negotiate the given protocol OpenSubstream { protocol_id: ProtocolId, reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>, }, /// Disconnect all substreams and close the transport connection Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>), } /// ID type for peer connections pub type ConnectionId = usize; /// Request handle for an active peer connection #[derive(Debug, Clone)] pub struct PeerConnection { id: ConnectionId, peer_node_id: NodeId, peer_features: PeerFeatures, request_tx: mpsc::Sender<PeerConnectionRequest>, address: Arc<Multiaddr>, direction: ConnectionDirection, started_at: Instant, substream_counter: AtomicRefCounter, handle_counter: Arc<()>, peer_identity_claim: Option<PeerIdentityClaim>, } impl PeerConnection { pub(crate) fn new( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, peer_identity_claim: PeerIdentityClaim, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: Some(peer_identity_claim), } } /// Should only be used in tests pub(crate) fn unverified( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: None, } } pub fn peer_node_id(&self) -> &NodeId { &self.peer_node_id } pub fn peer_features(&self) -> PeerFeatures { self.peer_features } pub fn direction(&self) -> ConnectionDirection { self.direction } pub fn address(&self) -> &Multiaddr { &self.address } pub fn id(&self) -> ConnectionId { self.id } pub fn is_connected(&self) -> bool { !self.request_tx.is_closed() } /// Returns a owned future that resolves on disconnection pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static { let request_tx = self.request_tx.clone(); async move { request_tx.closed().await } } pub fn age(&self) -> Duration { self.started_at.elapsed() } pub fn substream_count(&self) -> usize { self.substream_counter.get() } pub fn handle_count(&self) -> usize { Arc::strong_count(&self.handle_counter) } pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> { self.peer_identity_claim.as_ref() } #[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))] pub async fn open_substream( &mut self, protocol_id: &ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::OpenSubstream { protocol_id: protocol_id.clone(), reply_tx, }) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } #[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))] pub async fn open_framed_substream( &mut self, protocol_id: &ProtocolId, max_frame_size: usize, ) -> Result<CanonicalFraming<Substream>, PeerConnectionError>
#[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))] pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { self.connect_rpc_using_builder(Default::default()).await } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))] pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { let protocol = ProtocolId::from_static(T::PROTOCOL_NAME); debug!( target: LOG_TARGET, "Attempting to establish RPC protocol `{}` to peer `{}`", String::from_utf8_lossy(&protocol), self.peer_node_id ); let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?; builder .with_protocol_id(protocol) .with_node_id(self.peer_node_id.clone()) .connect(framed) .await } /// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to /// `max_sessions` sessions and provides client session that is least used. #[cfg(feature = "rpc")] pub fn create_rpc_client_pool<T>( &self, max_sessions: usize, client_config: RpcClientBuilder<T>, ) -> RpcClientPool<T> where T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone, { RpcClientPool::new(self.clone(), max_sessions, client_config) } /// Immediately disconnects the peer connection. This can only fail if the peer connection worker /// is shut down (and the peer is already disconnected) pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(false, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(true, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } } impl fmt::Display for PeerConnection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { write!( f, "Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}", self.id, self.peer_node_id.short_str(), self.direction, self.address, self.age(), self.substream_count(), self.handle_count() ) } } impl PartialEq for PeerConnection { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// Actor for an active connection to a peer. struct PeerConnectionActor { id: ConnectionId, peer_node_id: NodeId, request_rx: mpsc::Receiver<PeerConnectionRequest>, direction: ConnectionDirection, incoming_substreams: IncomingSubstreams, control: Control, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, } impl PeerConnectionActor { fn new( id: ConnectionId, peer_node_id: NodeId, direction: ConnectionDirection, connection: Yamux, request_rx: mpsc::Receiver<PeerConnectionRequest>, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, ) -> Self { Self { id, peer_node_id, direction, control: connection.get_yamux_control(), incoming_substreams: connection.into_incoming(), request_rx, event_notifier, our_supported_protocols, their_supported_protocols, } } pub async fn run(mut self) { loop { tokio::select! { maybe_request = self.request_rx.recv() => { match maybe_request { Some(request) => self.handle_request(request).await, None => { debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self); break; } } }, maybe_substream = self.incoming_substreams.next() => { match maybe_substream { Some(substream) => { if let Err(err) = self.handle_incoming_substream(substream).await { error!( target: LOG_TARGET, "[{}] Incoming substream for peer '{}' failed to open because '{error}'", self, self.peer_node_id.short_str(), error = err ) } }, None => { debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str()); break; }, } } } } if let Err(err) = self.disconnect(false).await { warn!( target: LOG_TARGET, "[{}] Failed to politely close connection to peer '{}' because '{}'", self, self.peer_node_id.short_str(), err ); } } async fn handle_request(&mut self, request: PeerConnectionRequest) { use PeerConnectionRequest::{Disconnect, OpenSubstream}; match request { OpenSubstream { protocol_id, reply_tx } => { let tracing_id = tracing::Span::current().id(); let span = span!(Level::TRACE, "handle_request"); span.follows_from(tracing_id); let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await; log_if_error_fmt!( target: LOG_TARGET, reply_tx.send(result), "Reply oneshot closed when sending reply", ); }, Disconnect(silent, reply_tx) => { debug!( target: LOG_TARGET, "[{}] Disconnect{}requested for {} connection to peer '{}'", self, if silent { " (silent) " } else { " " }, self.direction, self.peer_node_id.short_str() ); let _result = reply_tx.send(self.disconnect(silent).await); }, } } #[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))] async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> { let selected_protocol = ProtocolNegotiation::new(&mut stream) .negotiate_protocol_inbound(&self.our_supported_protocols) .await?; self.notify_event(ConnectionManagerEvent::NewInboundSubstream( self.peer_node_id.clone(), selected_protocol, stream, )) .await; Ok(()) } #[tracing::instrument(skip(self))] async fn open_negotiated_protocol_stream( &mut self, protocol: ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10); debug!( target: LOG_TARGET, "[{}] Negotiating protocol '{}' on new substream for peer '{}'", self, String::from_utf8_lossy(&protocol), self.peer_node_id.short_str() ); let mut stream = self.control.open_stream().await?; let mut negotiation = ProtocolNegotiation::new(&mut stream); let selected_protocol = if self.their_supported_protocols.contains(&protocol) { let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? } else { let selected_protocols = [protocol]; let fut = negotiation.negotiate_protocol_outbound(&selected_protocols); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? }; Ok(NegotiatedSubstream::new(selected_protocol, stream)) } async fn notify_event(&mut self, event: ConnectionManagerEvent) { let _result = self.event_notifier.send(event).await; } /// Disconnect this peer connection. /// /// # Arguments /// /// silent - true to suppress the PeerDisconnected event, false to publish the event async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> { self.request_rx.close(); match self.control.close().await { Err(yamux::ConnectionError::Closed) => { debug!( target: LOG_TARGET, "(Peer = {}) Connection already closed", self.peer_node_id.short_str() ); return Ok(()); }, // Only emit closed event once _ => { if !silent { self.notify_event(ConnectionManagerEvent::PeerDisconnected( self.id, self.peer_node_id.clone(), )) .await; } }, } debug!( target: LOG_TARGET, "(Peer = {}) Connection closed", self.peer_node_id.short_str() ); Ok(()) } } impl fmt::Display for PeerConnectionActor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "PeerConnection(id={}, peer_node_id={}, direction={})", self.id, self.peer_node_id.short_str(), self.direction, ) } } /// Contains the substream and the ProtocolId that was successfully negotiated. pub struct NegotiatedSubstream<TSubstream> { pub protocol: ProtocolId, pub stream: TSubstream, } impl<TSubstream> NegotiatedSubstream<TSubstream> { pub fn new(protocol: ProtocolId, stream: TSubstream) -> Self { Self { protocol, stream } } } impl<TSubstream> fmt::Debug for NegotiatedSubstream<TSubstream> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NegotiatedSubstream") .field("protocol", &format!("{:?}", self.protocol)) .field("stream", &"...".to_string()) .finish() } }
{ let substream = self.open_substream(protocol_id).await?; Ok(framing::canonical(substream.stream, max_frame_size)) }
identifier_body
peer_connection.rs
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ fmt, future::Future, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::{Duration, Instant}, }; use log::*; use multiaddr::Multiaddr; use tokio::{ sync::{mpsc, oneshot}, time, }; use tokio_stream::StreamExt; use tracing::{self, span, Instrument, Level}; use super::{ direction::ConnectionDirection, error::{ConnectionManagerError, PeerConnectionError}, manager::ConnectionManagerEvent, }; #[cfg(feature = "rpc")] use crate::protocol::rpc::{ pool::RpcClientPool, pool::RpcPoolClient, NamedProtocolService, RpcClient, RpcClientBuilder, RpcError, RPC_MAX_FRAME_SIZE, }; use crate::{ framing, framing::CanonicalFraming, multiplexing::{Control, IncomingSubstreams, Substream, Yamux}, peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim}, protocol::{ProtocolId, ProtocolNegotiation}, utils::atomic_ref_counter::AtomicRefCounter, }; const LOG_TARGET: &str = "comms::connection_manager::peer_connection"; static ID_COUNTER: AtomicUsize = AtomicUsize::new(0); pub fn try_create( connection: Yamux, peer_addr: Multiaddr, peer_node_id: NodeId, peer_features: PeerFeatures, direction: ConnectionDirection, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, peer_identity_claim: PeerIdentityClaim, ) -> Result<PeerConnection, ConnectionManagerError> { trace!( target: LOG_TARGET, "(Peer={}) Socket successfully upgraded to multiplexed socket", peer_node_id.short_str() ); // All requests are request/response, so a channel size of 1 is all that is needed let (peer_tx, peer_rx) = mpsc::channel(1); let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic let substream_counter = connection.substream_counter(); let peer_conn = PeerConnection::new( id, peer_tx, peer_node_id.clone(), peer_features, peer_addr, direction, substream_counter, peer_identity_claim, ); let peer_actor = PeerConnectionActor::new( id, peer_node_id, direction, connection, peer_rx, event_notifier, our_supported_protocols, their_supported_protocols, ); tokio::spawn(peer_actor.run()); Ok(peer_conn) } /// Request types for the PeerConnection actor. #[derive(Debug)] pub enum PeerConnectionRequest { /// Open a new substream and negotiate the given protocol OpenSubstream { protocol_id: ProtocolId, reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>, }, /// Disconnect all substreams and close the transport connection Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>), } /// ID type for peer connections pub type ConnectionId = usize; /// Request handle for an active peer connection #[derive(Debug, Clone)] pub struct PeerConnection { id: ConnectionId, peer_node_id: NodeId, peer_features: PeerFeatures, request_tx: mpsc::Sender<PeerConnectionRequest>, address: Arc<Multiaddr>, direction: ConnectionDirection, started_at: Instant, substream_counter: AtomicRefCounter, handle_counter: Arc<()>, peer_identity_claim: Option<PeerIdentityClaim>, } impl PeerConnection { pub(crate) fn new( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, peer_identity_claim: PeerIdentityClaim, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: Some(peer_identity_claim), } } /// Should only be used in tests pub(crate) fn unverified( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: None, } } pub fn peer_node_id(&self) -> &NodeId { &self.peer_node_id } pub fn peer_features(&self) -> PeerFeatures { self.peer_features } pub fn direction(&self) -> ConnectionDirection { self.direction } pub fn address(&self) -> &Multiaddr { &self.address } pub fn id(&self) -> ConnectionId { self.id } pub fn is_connected(&self) -> bool { !self.request_tx.is_closed() } /// Returns a owned future that resolves on disconnection pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static { let request_tx = self.request_tx.clone(); async move { request_tx.closed().await } } pub fn age(&self) -> Duration { self.started_at.elapsed() } pub fn substream_count(&self) -> usize { self.substream_counter.get() } pub fn handle_count(&self) -> usize { Arc::strong_count(&self.handle_counter) } pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> { self.peer_identity_claim.as_ref() } #[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))] pub async fn open_substream( &mut self, protocol_id: &ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::OpenSubstream { protocol_id: protocol_id.clone(), reply_tx, }) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } #[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))] pub async fn open_framed_substream( &mut self, protocol_id: &ProtocolId, max_frame_size: usize, ) -> Result<CanonicalFraming<Substream>, PeerConnectionError> { let substream = self.open_substream(protocol_id).await?; Ok(framing::canonical(substream.stream, max_frame_size)) } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))] pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { self.connect_rpc_using_builder(Default::default()).await } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))] pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { let protocol = ProtocolId::from_static(T::PROTOCOL_NAME); debug!( target: LOG_TARGET, "Attempting to establish RPC protocol `{}` to peer `{}`", String::from_utf8_lossy(&protocol), self.peer_node_id ); let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?; builder .with_protocol_id(protocol) .with_node_id(self.peer_node_id.clone()) .connect(framed) .await } /// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to /// `max_sessions` sessions and provides client session that is least used. #[cfg(feature = "rpc")] pub fn create_rpc_client_pool<T>( &self, max_sessions: usize, client_config: RpcClientBuilder<T>, ) -> RpcClientPool<T> where T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone, { RpcClientPool::new(self.clone(), max_sessions, client_config) } /// Immediately disconnects the peer connection. This can only fail if the peer connection worker /// is shut down (and the peer is already disconnected) pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(false, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(true, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } } impl fmt::Display for PeerConnection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { write!( f, "Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}", self.id, self.peer_node_id.short_str(), self.direction, self.address, self.age(), self.substream_count(), self.handle_count() ) } } impl PartialEq for PeerConnection { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// Actor for an active connection to a peer. struct PeerConnectionActor { id: ConnectionId, peer_node_id: NodeId, request_rx: mpsc::Receiver<PeerConnectionRequest>, direction: ConnectionDirection, incoming_substreams: IncomingSubstreams, control: Control, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, } impl PeerConnectionActor { fn new( id: ConnectionId, peer_node_id: NodeId, direction: ConnectionDirection, connection: Yamux, request_rx: mpsc::Receiver<PeerConnectionRequest>, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, ) -> Self { Self { id, peer_node_id, direction, control: connection.get_yamux_control(), incoming_substreams: connection.into_incoming(), request_rx, event_notifier, our_supported_protocols, their_supported_protocols, } } pub async fn run(mut self) { loop { tokio::select! { maybe_request = self.request_rx.recv() => { match maybe_request { Some(request) => self.handle_request(request).await, None => { debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self); break; } } }, maybe_substream = self.incoming_substreams.next() => { match maybe_substream { Some(substream) => { if let Err(err) = self.handle_incoming_substream(substream).await { error!( target: LOG_TARGET, "[{}] Incoming substream for peer '{}' failed to open because '{error}'", self, self.peer_node_id.short_str(), error = err ) } }, None => { debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str()); break; }, } } } } if let Err(err) = self.disconnect(false).await { warn!( target: LOG_TARGET, "[{}] Failed to politely close connection to peer '{}' because '{}'", self, self.peer_node_id.short_str(), err ); } } async fn handle_request(&mut self, request: PeerConnectionRequest) { use PeerConnectionRequest::{Disconnect, OpenSubstream}; match request { OpenSubstream { protocol_id, reply_tx } =>
, Disconnect(silent, reply_tx) => { debug!( target: LOG_TARGET, "[{}] Disconnect{}requested for {} connection to peer '{}'", self, if silent { " (silent) " } else { " " }, self.direction, self.peer_node_id.short_str() ); let _result = reply_tx.send(self.disconnect(silent).await); }, } } #[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))] async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> { let selected_protocol = ProtocolNegotiation::new(&mut stream) .negotiate_protocol_inbound(&self.our_supported_protocols) .await?; self.notify_event(ConnectionManagerEvent::NewInboundSubstream( self.peer_node_id.clone(), selected_protocol, stream, )) .await; Ok(()) } #[tracing::instrument(skip(self))] async fn open_negotiated_protocol_stream( &mut self, protocol: ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10); debug!( target: LOG_TARGET, "[{}] Negotiating protocol '{}' on new substream for peer '{}'", self, String::from_utf8_lossy(&protocol), self.peer_node_id.short_str() ); let mut stream = self.control.open_stream().await?; let mut negotiation = ProtocolNegotiation::new(&mut stream); let selected_protocol = if self.their_supported_protocols.contains(&protocol) { let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? } else { let selected_protocols = [protocol]; let fut = negotiation.negotiate_protocol_outbound(&selected_protocols); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? }; Ok(NegotiatedSubstream::new(selected_protocol, stream)) } async fn notify_event(&mut self, event: ConnectionManagerEvent) { let _result = self.event_notifier.send(event).await; } /// Disconnect this peer connection. /// /// # Arguments /// /// silent - true to suppress the PeerDisconnected event, false to publish the event async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> { self.request_rx.close(); match self.control.close().await { Err(yamux::ConnectionError::Closed) => { debug!( target: LOG_TARGET, "(Peer = {}) Connection already closed", self.peer_node_id.short_str() ); return Ok(()); }, // Only emit closed event once _ => { if !silent { self.notify_event(ConnectionManagerEvent::PeerDisconnected( self.id, self.peer_node_id.clone(), )) .await; } }, } debug!( target: LOG_TARGET, "(Peer = {}) Connection closed", self.peer_node_id.short_str() ); Ok(()) } } impl fmt::Display for PeerConnectionActor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "PeerConnection(id={}, peer_node_id={}, direction={})", self.id, self.peer_node_id.short_str(), self.direction, ) } } /// Contains the substream and the ProtocolId that was successfully negotiated. pub struct NegotiatedSubstream<TSubstream> { pub protocol: ProtocolId, pub stream: TSubstream, } impl<TSubstream> NegotiatedSubstream<TSubstream> { pub fn new(protocol: ProtocolId, stream: TSubstream) -> Self { Self { protocol, stream } } } impl<TSubstream> fmt::Debug for NegotiatedSubstream<TSubstream> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NegotiatedSubstream") .field("protocol", &format!("{:?}", self.protocol)) .field("stream", &"...".to_string()) .finish() } }
{ let tracing_id = tracing::Span::current().id(); let span = span!(Level::TRACE, "handle_request"); span.follows_from(tracing_id); let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await; log_if_error_fmt!( target: LOG_TARGET, reply_tx.send(result), "Reply oneshot closed when sending reply", ); }
conditional_block
peer_connection.rs
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ fmt, future::Future, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::{Duration, Instant}, }; use log::*; use multiaddr::Multiaddr; use tokio::{ sync::{mpsc, oneshot}, time, }; use tokio_stream::StreamExt; use tracing::{self, span, Instrument, Level}; use super::{ direction::ConnectionDirection, error::{ConnectionManagerError, PeerConnectionError}, manager::ConnectionManagerEvent, }; #[cfg(feature = "rpc")] use crate::protocol::rpc::{ pool::RpcClientPool, pool::RpcPoolClient, NamedProtocolService, RpcClient, RpcClientBuilder, RpcError, RPC_MAX_FRAME_SIZE, }; use crate::{ framing, framing::CanonicalFraming, multiplexing::{Control, IncomingSubstreams, Substream, Yamux}, peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim}, protocol::{ProtocolId, ProtocolNegotiation}, utils::atomic_ref_counter::AtomicRefCounter, }; const LOG_TARGET: &str = "comms::connection_manager::peer_connection"; static ID_COUNTER: AtomicUsize = AtomicUsize::new(0); pub fn try_create( connection: Yamux, peer_addr: Multiaddr, peer_node_id: NodeId, peer_features: PeerFeatures, direction: ConnectionDirection, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, peer_identity_claim: PeerIdentityClaim, ) -> Result<PeerConnection, ConnectionManagerError> { trace!( target: LOG_TARGET, "(Peer={}) Socket successfully upgraded to multiplexed socket", peer_node_id.short_str() ); // All requests are request/response, so a channel size of 1 is all that is needed let (peer_tx, peer_rx) = mpsc::channel(1); let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic let substream_counter = connection.substream_counter(); let peer_conn = PeerConnection::new( id, peer_tx, peer_node_id.clone(), peer_features, peer_addr, direction, substream_counter, peer_identity_claim, ); let peer_actor = PeerConnectionActor::new( id, peer_node_id, direction, connection, peer_rx, event_notifier, our_supported_protocols, their_supported_protocols, ); tokio::spawn(peer_actor.run()); Ok(peer_conn) } /// Request types for the PeerConnection actor. #[derive(Debug)] pub enum PeerConnectionRequest { /// Open a new substream and negotiate the given protocol OpenSubstream { protocol_id: ProtocolId, reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>, }, /// Disconnect all substreams and close the transport connection Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>), } /// ID type for peer connections pub type ConnectionId = usize; /// Request handle for an active peer connection #[derive(Debug, Clone)] pub struct PeerConnection { id: ConnectionId, peer_node_id: NodeId, peer_features: PeerFeatures, request_tx: mpsc::Sender<PeerConnectionRequest>, address: Arc<Multiaddr>, direction: ConnectionDirection, started_at: Instant, substream_counter: AtomicRefCounter, handle_counter: Arc<()>, peer_identity_claim: Option<PeerIdentityClaim>, } impl PeerConnection { pub(crate) fn new( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, peer_identity_claim: PeerIdentityClaim, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: Some(peer_identity_claim), } } /// Should only be used in tests pub(crate) fn unverified( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: None, } } pub fn peer_node_id(&self) -> &NodeId { &self.peer_node_id } pub fn peer_features(&self) -> PeerFeatures { self.peer_features } pub fn direction(&self) -> ConnectionDirection { self.direction } pub fn address(&self) -> &Multiaddr { &self.address } pub fn id(&self) -> ConnectionId { self.id } pub fn is_connected(&self) -> bool { !self.request_tx.is_closed() } /// Returns a owned future that resolves on disconnection pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static { let request_tx = self.request_tx.clone(); async move { request_tx.closed().await } } pub fn age(&self) -> Duration { self.started_at.elapsed() } pub fn substream_count(&self) -> usize { self.substream_counter.get() } pub fn handle_count(&self) -> usize { Arc::strong_count(&self.handle_counter) } pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> { self.peer_identity_claim.as_ref() } #[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))] pub async fn open_substream( &mut self, protocol_id: &ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::OpenSubstream { protocol_id: protocol_id.clone(), reply_tx, }) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } #[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))] pub async fn open_framed_substream( &mut self, protocol_id: &ProtocolId, max_frame_size: usize, ) -> Result<CanonicalFraming<Substream>, PeerConnectionError> { let substream = self.open_substream(protocol_id).await?; Ok(framing::canonical(substream.stream, max_frame_size)) } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))] pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { self.connect_rpc_using_builder(Default::default()).await } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))] pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { let protocol = ProtocolId::from_static(T::PROTOCOL_NAME); debug!( target: LOG_TARGET, "Attempting to establish RPC protocol `{}` to peer `{}`", String::from_utf8_lossy(&protocol), self.peer_node_id ); let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?; builder .with_protocol_id(protocol) .with_node_id(self.peer_node_id.clone()) .connect(framed) .await } /// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to /// `max_sessions` sessions and provides client session that is least used. #[cfg(feature = "rpc")] pub fn create_rpc_client_pool<T>( &self, max_sessions: usize, client_config: RpcClientBuilder<T>, ) -> RpcClientPool<T> where T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone, { RpcClientPool::new(self.clone(), max_sessions, client_config) } /// Immediately disconnects the peer connection. This can only fail if the peer connection worker /// is shut down (and the peer is already disconnected) pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(false, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(true, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } } impl fmt::Display for PeerConnection { fn
(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { write!( f, "Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}", self.id, self.peer_node_id.short_str(), self.direction, self.address, self.age(), self.substream_count(), self.handle_count() ) } } impl PartialEq for PeerConnection { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// Actor for an active connection to a peer. struct PeerConnectionActor { id: ConnectionId, peer_node_id: NodeId, request_rx: mpsc::Receiver<PeerConnectionRequest>, direction: ConnectionDirection, incoming_substreams: IncomingSubstreams, control: Control, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, } impl PeerConnectionActor { fn new( id: ConnectionId, peer_node_id: NodeId, direction: ConnectionDirection, connection: Yamux, request_rx: mpsc::Receiver<PeerConnectionRequest>, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, ) -> Self { Self { id, peer_node_id, direction, control: connection.get_yamux_control(), incoming_substreams: connection.into_incoming(), request_rx, event_notifier, our_supported_protocols, their_supported_protocols, } } pub async fn run(mut self) { loop { tokio::select! { maybe_request = self.request_rx.recv() => { match maybe_request { Some(request) => self.handle_request(request).await, None => { debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self); break; } } }, maybe_substream = self.incoming_substreams.next() => { match maybe_substream { Some(substream) => { if let Err(err) = self.handle_incoming_substream(substream).await { error!( target: LOG_TARGET, "[{}] Incoming substream for peer '{}' failed to open because '{error}'", self, self.peer_node_id.short_str(), error = err ) } }, None => { debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str()); break; }, } } } } if let Err(err) = self.disconnect(false).await { warn!( target: LOG_TARGET, "[{}] Failed to politely close connection to peer '{}' because '{}'", self, self.peer_node_id.short_str(), err ); } } async fn handle_request(&mut self, request: PeerConnectionRequest) { use PeerConnectionRequest::{Disconnect, OpenSubstream}; match request { OpenSubstream { protocol_id, reply_tx } => { let tracing_id = tracing::Span::current().id(); let span = span!(Level::TRACE, "handle_request"); span.follows_from(tracing_id); let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await; log_if_error_fmt!( target: LOG_TARGET, reply_tx.send(result), "Reply oneshot closed when sending reply", ); }, Disconnect(silent, reply_tx) => { debug!( target: LOG_TARGET, "[{}] Disconnect{}requested for {} connection to peer '{}'", self, if silent { " (silent) " } else { " " }, self.direction, self.peer_node_id.short_str() ); let _result = reply_tx.send(self.disconnect(silent).await); }, } } #[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))] async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> { let selected_protocol = ProtocolNegotiation::new(&mut stream) .negotiate_protocol_inbound(&self.our_supported_protocols) .await?; self.notify_event(ConnectionManagerEvent::NewInboundSubstream( self.peer_node_id.clone(), selected_protocol, stream, )) .await; Ok(()) } #[tracing::instrument(skip(self))] async fn open_negotiated_protocol_stream( &mut self, protocol: ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10); debug!( target: LOG_TARGET, "[{}] Negotiating protocol '{}' on new substream for peer '{}'", self, String::from_utf8_lossy(&protocol), self.peer_node_id.short_str() ); let mut stream = self.control.open_stream().await?; let mut negotiation = ProtocolNegotiation::new(&mut stream); let selected_protocol = if self.their_supported_protocols.contains(&protocol) { let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? } else { let selected_protocols = [protocol]; let fut = negotiation.negotiate_protocol_outbound(&selected_protocols); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? }; Ok(NegotiatedSubstream::new(selected_protocol, stream)) } async fn notify_event(&mut self, event: ConnectionManagerEvent) { let _result = self.event_notifier.send(event).await; } /// Disconnect this peer connection. /// /// # Arguments /// /// silent - true to suppress the PeerDisconnected event, false to publish the event async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> { self.request_rx.close(); match self.control.close().await { Err(yamux::ConnectionError::Closed) => { debug!( target: LOG_TARGET, "(Peer = {}) Connection already closed", self.peer_node_id.short_str() ); return Ok(()); }, // Only emit closed event once _ => { if !silent { self.notify_event(ConnectionManagerEvent::PeerDisconnected( self.id, self.peer_node_id.clone(), )) .await; } }, } debug!( target: LOG_TARGET, "(Peer = {}) Connection closed", self.peer_node_id.short_str() ); Ok(()) } } impl fmt::Display for PeerConnectionActor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "PeerConnection(id={}, peer_node_id={}, direction={})", self.id, self.peer_node_id.short_str(), self.direction, ) } } /// Contains the substream and the ProtocolId that was successfully negotiated. pub struct NegotiatedSubstream<TSubstream> { pub protocol: ProtocolId, pub stream: TSubstream, } impl<TSubstream> NegotiatedSubstream<TSubstream> { pub fn new(protocol: ProtocolId, stream: TSubstream) -> Self { Self { protocol, stream } } } impl<TSubstream> fmt::Debug for NegotiatedSubstream<TSubstream> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NegotiatedSubstream") .field("protocol", &format!("{:?}", self.protocol)) .field("stream", &"...".to_string()) .finish() } }
fmt
identifier_name
peer_connection.rs
// Copyright 2019, The Tari Project // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the // following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following // disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the // following disclaimer in the documentation and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote // products derived from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, // INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE // USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. use std::{ fmt, future::Future, sync::{ atomic::{AtomicUsize, Ordering}, Arc, }, time::{Duration, Instant}, }; use log::*; use multiaddr::Multiaddr; use tokio::{ sync::{mpsc, oneshot}, time, }; use tokio_stream::StreamExt; use tracing::{self, span, Instrument, Level}; use super::{ direction::ConnectionDirection, error::{ConnectionManagerError, PeerConnectionError}, manager::ConnectionManagerEvent, }; #[cfg(feature = "rpc")] use crate::protocol::rpc::{ pool::RpcClientPool, pool::RpcPoolClient, NamedProtocolService, RpcClient, RpcClientBuilder, RpcError, RPC_MAX_FRAME_SIZE, }; use crate::{ framing, framing::CanonicalFraming, multiplexing::{Control, IncomingSubstreams, Substream, Yamux}, peer_manager::{NodeId, PeerFeatures, PeerIdentityClaim}, protocol::{ProtocolId, ProtocolNegotiation}, utils::atomic_ref_counter::AtomicRefCounter, }; const LOG_TARGET: &str = "comms::connection_manager::peer_connection"; static ID_COUNTER: AtomicUsize = AtomicUsize::new(0); pub fn try_create( connection: Yamux, peer_addr: Multiaddr, peer_node_id: NodeId, peer_features: PeerFeatures, direction: ConnectionDirection, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, peer_identity_claim: PeerIdentityClaim, ) -> Result<PeerConnection, ConnectionManagerError> { trace!( target: LOG_TARGET, "(Peer={}) Socket successfully upgraded to multiplexed socket", peer_node_id.short_str() ); // All requests are request/response, so a channel size of 1 is all that is needed let (peer_tx, peer_rx) = mpsc::channel(1); let id = ID_COUNTER.fetch_add(1, Ordering::Relaxed); // Monotonic let substream_counter = connection.substream_counter(); let peer_conn = PeerConnection::new( id, peer_tx, peer_node_id.clone(), peer_features, peer_addr, direction, substream_counter, peer_identity_claim, ); let peer_actor = PeerConnectionActor::new( id, peer_node_id, direction, connection, peer_rx, event_notifier, our_supported_protocols, their_supported_protocols, ); tokio::spawn(peer_actor.run()); Ok(peer_conn) } /// Request types for the PeerConnection actor. #[derive(Debug)] pub enum PeerConnectionRequest { /// Open a new substream and negotiate the given protocol OpenSubstream { protocol_id: ProtocolId, reply_tx: oneshot::Sender<Result<NegotiatedSubstream<Substream>, PeerConnectionError>>, }, /// Disconnect all substreams and close the transport connection Disconnect(bool, oneshot::Sender<Result<(), PeerConnectionError>>), } /// ID type for peer connections pub type ConnectionId = usize; /// Request handle for an active peer connection #[derive(Debug, Clone)] pub struct PeerConnection { id: ConnectionId, peer_node_id: NodeId,
address: Arc<Multiaddr>, direction: ConnectionDirection, started_at: Instant, substream_counter: AtomicRefCounter, handle_counter: Arc<()>, peer_identity_claim: Option<PeerIdentityClaim>, } impl PeerConnection { pub(crate) fn new( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, peer_identity_claim: PeerIdentityClaim, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: Some(peer_identity_claim), } } /// Should only be used in tests pub(crate) fn unverified( id: ConnectionId, request_tx: mpsc::Sender<PeerConnectionRequest>, peer_node_id: NodeId, peer_features: PeerFeatures, address: Multiaddr, direction: ConnectionDirection, substream_counter: AtomicRefCounter, ) -> Self { Self { id, request_tx, peer_node_id, peer_features, address: Arc::new(address), direction, started_at: Instant::now(), substream_counter, handle_counter: Arc::new(()), peer_identity_claim: None, } } pub fn peer_node_id(&self) -> &NodeId { &self.peer_node_id } pub fn peer_features(&self) -> PeerFeatures { self.peer_features } pub fn direction(&self) -> ConnectionDirection { self.direction } pub fn address(&self) -> &Multiaddr { &self.address } pub fn id(&self) -> ConnectionId { self.id } pub fn is_connected(&self) -> bool { !self.request_tx.is_closed() } /// Returns a owned future that resolves on disconnection pub fn on_disconnect(&self) -> impl Future<Output = ()> + 'static { let request_tx = self.request_tx.clone(); async move { request_tx.closed().await } } pub fn age(&self) -> Duration { self.started_at.elapsed() } pub fn substream_count(&self) -> usize { self.substream_counter.get() } pub fn handle_count(&self) -> usize { Arc::strong_count(&self.handle_counter) } pub fn peer_identity_claim(&self) -> Option<&PeerIdentityClaim> { self.peer_identity_claim.as_ref() } #[tracing::instrument(level = "trace", "peer_connection::open_substream", skip(self))] pub async fn open_substream( &mut self, protocol_id: &ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::OpenSubstream { protocol_id: protocol_id.clone(), reply_tx, }) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } #[tracing::instrument(level = "trace", "peer_connection::open_framed_substream", skip(self))] pub async fn open_framed_substream( &mut self, protocol_id: &ProtocolId, max_frame_size: usize, ) -> Result<CanonicalFraming<Substream>, PeerConnectionError> { let substream = self.open_substream(protocol_id).await?; Ok(framing::canonical(substream.stream, max_frame_size)) } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc", level="trace", skip(self), fields(peer_node_id = self.peer_node_id.to_string().as_str()))] pub async fn connect_rpc<T>(&mut self) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { self.connect_rpc_using_builder(Default::default()).await } #[cfg(feature = "rpc")] #[tracing::instrument("peer_connection::connect_rpc_with_builder", level = "trace", skip(self, builder))] pub async fn connect_rpc_using_builder<T>(&mut self, builder: RpcClientBuilder<T>) -> Result<T, RpcError> where T: From<RpcClient> + NamedProtocolService { let protocol = ProtocolId::from_static(T::PROTOCOL_NAME); debug!( target: LOG_TARGET, "Attempting to establish RPC protocol `{}` to peer `{}`", String::from_utf8_lossy(&protocol), self.peer_node_id ); let framed = self.open_framed_substream(&protocol, RPC_MAX_FRAME_SIZE).await?; builder .with_protocol_id(protocol) .with_node_id(self.peer_node_id.clone()) .connect(framed) .await } /// Creates a new RpcClientPool that can be shared between tasks. The client pool will lazily establish up to /// `max_sessions` sessions and provides client session that is least used. #[cfg(feature = "rpc")] pub fn create_rpc_client_pool<T>( &self, max_sessions: usize, client_config: RpcClientBuilder<T>, ) -> RpcClientPool<T> where T: RpcPoolClient + From<RpcClient> + NamedProtocolService + Clone, { RpcClientPool::new(self.clone(), max_sessions, client_config) } /// Immediately disconnects the peer connection. This can only fail if the peer connection worker /// is shut down (and the peer is already disconnected) pub async fn disconnect(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(false, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } pub(crate) async fn disconnect_silent(&mut self) -> Result<(), PeerConnectionError> { let (reply_tx, reply_rx) = oneshot::channel(); self.request_tx .send(PeerConnectionRequest::Disconnect(true, reply_tx)) .await?; reply_rx .await .map_err(|_| PeerConnectionError::InternalReplyCancelled)? } } impl fmt::Display for PeerConnection { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> Result<(), fmt::Error> { write!( f, "Id: {}, Node ID: {}, Direction: {}, Peer Address: {}, Age: {:.0?}, #Substreams: {}, #Refs: {}", self.id, self.peer_node_id.short_str(), self.direction, self.address, self.age(), self.substream_count(), self.handle_count() ) } } impl PartialEq for PeerConnection { fn eq(&self, other: &Self) -> bool { self.id == other.id } } /// Actor for an active connection to a peer. struct PeerConnectionActor { id: ConnectionId, peer_node_id: NodeId, request_rx: mpsc::Receiver<PeerConnectionRequest>, direction: ConnectionDirection, incoming_substreams: IncomingSubstreams, control: Control, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, } impl PeerConnectionActor { fn new( id: ConnectionId, peer_node_id: NodeId, direction: ConnectionDirection, connection: Yamux, request_rx: mpsc::Receiver<PeerConnectionRequest>, event_notifier: mpsc::Sender<ConnectionManagerEvent>, our_supported_protocols: Vec<ProtocolId>, their_supported_protocols: Vec<ProtocolId>, ) -> Self { Self { id, peer_node_id, direction, control: connection.get_yamux_control(), incoming_substreams: connection.into_incoming(), request_rx, event_notifier, our_supported_protocols, their_supported_protocols, } } pub async fn run(mut self) { loop { tokio::select! { maybe_request = self.request_rx.recv() => { match maybe_request { Some(request) => self.handle_request(request).await, None => { debug!(target: LOG_TARGET, "[{}] All peer connection handles dropped closing the connection", self); break; } } }, maybe_substream = self.incoming_substreams.next() => { match maybe_substream { Some(substream) => { if let Err(err) = self.handle_incoming_substream(substream).await { error!( target: LOG_TARGET, "[{}] Incoming substream for peer '{}' failed to open because '{error}'", self, self.peer_node_id.short_str(), error = err ) } }, None => { debug!(target: LOG_TARGET, "[{}] Peer '{}' closed the connection", self, self.peer_node_id.short_str()); break; }, } } } } if let Err(err) = self.disconnect(false).await { warn!( target: LOG_TARGET, "[{}] Failed to politely close connection to peer '{}' because '{}'", self, self.peer_node_id.short_str(), err ); } } async fn handle_request(&mut self, request: PeerConnectionRequest) { use PeerConnectionRequest::{Disconnect, OpenSubstream}; match request { OpenSubstream { protocol_id, reply_tx } => { let tracing_id = tracing::Span::current().id(); let span = span!(Level::TRACE, "handle_request"); span.follows_from(tracing_id); let result = self.open_negotiated_protocol_stream(protocol_id).instrument(span).await; log_if_error_fmt!( target: LOG_TARGET, reply_tx.send(result), "Reply oneshot closed when sending reply", ); }, Disconnect(silent, reply_tx) => { debug!( target: LOG_TARGET, "[{}] Disconnect{}requested for {} connection to peer '{}'", self, if silent { " (silent) " } else { " " }, self.direction, self.peer_node_id.short_str() ); let _result = reply_tx.send(self.disconnect(silent).await); }, } } #[tracing::instrument(level="trace", skip(self, stream),fields(comms.direction="inbound"))] async fn handle_incoming_substream(&mut self, mut stream: Substream) -> Result<(), PeerConnectionError> { let selected_protocol = ProtocolNegotiation::new(&mut stream) .negotiate_protocol_inbound(&self.our_supported_protocols) .await?; self.notify_event(ConnectionManagerEvent::NewInboundSubstream( self.peer_node_id.clone(), selected_protocol, stream, )) .await; Ok(()) } #[tracing::instrument(skip(self))] async fn open_negotiated_protocol_stream( &mut self, protocol: ProtocolId, ) -> Result<NegotiatedSubstream<Substream>, PeerConnectionError> { const PROTOCOL_NEGOTIATION_TIMEOUT: Duration = Duration::from_secs(10); debug!( target: LOG_TARGET, "[{}] Negotiating protocol '{}' on new substream for peer '{}'", self, String::from_utf8_lossy(&protocol), self.peer_node_id.short_str() ); let mut stream = self.control.open_stream().await?; let mut negotiation = ProtocolNegotiation::new(&mut stream); let selected_protocol = if self.their_supported_protocols.contains(&protocol) { let fut = negotiation.negotiate_protocol_outbound_optimistic(&protocol); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? } else { let selected_protocols = [protocol]; let fut = negotiation.negotiate_protocol_outbound(&selected_protocols); time::timeout(PROTOCOL_NEGOTIATION_TIMEOUT, fut).await?? }; Ok(NegotiatedSubstream::new(selected_protocol, stream)) } async fn notify_event(&mut self, event: ConnectionManagerEvent) { let _result = self.event_notifier.send(event).await; } /// Disconnect this peer connection. /// /// # Arguments /// /// silent - true to suppress the PeerDisconnected event, false to publish the event async fn disconnect(&mut self, silent: bool) -> Result<(), PeerConnectionError> { self.request_rx.close(); match self.control.close().await { Err(yamux::ConnectionError::Closed) => { debug!( target: LOG_TARGET, "(Peer = {}) Connection already closed", self.peer_node_id.short_str() ); return Ok(()); }, // Only emit closed event once _ => { if !silent { self.notify_event(ConnectionManagerEvent::PeerDisconnected( self.id, self.peer_node_id.clone(), )) .await; } }, } debug!( target: LOG_TARGET, "(Peer = {}) Connection closed", self.peer_node_id.short_str() ); Ok(()) } } impl fmt::Display for PeerConnectionActor { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, "PeerConnection(id={}, peer_node_id={}, direction={})", self.id, self.peer_node_id.short_str(), self.direction, ) } } /// Contains the substream and the ProtocolId that was successfully negotiated. pub struct NegotiatedSubstream<TSubstream> { pub protocol: ProtocolId, pub stream: TSubstream, } impl<TSubstream> NegotiatedSubstream<TSubstream> { pub fn new(protocol: ProtocolId, stream: TSubstream) -> Self { Self { protocol, stream } } } impl<TSubstream> fmt::Debug for NegotiatedSubstream<TSubstream> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("NegotiatedSubstream") .field("protocol", &format!("{:?}", self.protocol)) .field("stream", &"...".to_string()) .finish() } }
peer_features: PeerFeatures, request_tx: mpsc::Sender<PeerConnectionRequest>,
random_line_split
web.js
//preload images // $.ajaxSetup({ // timeout:200 // }); var vod = window.vod = { selectedMedia:{}, mediaDisplayType:"popover", searchXHR:null, displayMedia:function(el, html){ //TODO: destroy any existing media var media = $(".media"); var data = el.attr("media-data"); var type = el.attr("media-type").toLowerCase(); media.removeClass('selected'); if(!data){ return; } data = JSON.parse(decodeURIComponent(data)); for(var file in data.files){ var len = data.files[file].size; data.files[file].size = prettyBytes(len); } if(type == "series" && data.files.length){ //latest episode var latest_episode = _.last(data.files); latest_episode.title = "S" + latest_episode.season + "E" + latest_episode.episode; data.latest_episode = latest_episode; data.description = data.latest_episode.plot || data.description; } el.addClass('selected'); media.popover('destroy'); $(".tabpop") .removeClass("tabpop-anim-in") .removeClass("tabpop-anim-out"); //display if(this.mediaDisplayType == "popover"){ el.popover({ html:true, content:jade.render(type, data) }).popover('show'); }else{ $(".tabpop").html(jade.render(type, data)) $(".tabpop").addClass("tabpop-anim-in"); } }, filterSearch:function(){ //get properties var props = $("[filter-prop]") var props_build = {}; if(props.length){ props.each(function(){ //debugger; var self = $(this) var prop = self.attr('filter-prop'); var val; if(self.find('li.active').length){ val = self.find('li.active').text() }else{ val = self.find('label.active').text() } if(val && val.length > 0){ props_build[prop] = val; } }); } //get str var finder = $("#finder"); var query = finder.val(); var tokens = query.split(' '); var t_obj = {}; tokens = _.map(tokens,function(t){ if(t.indexOf(":") != -1){ var str = t.split(':'); var key = str[0]; var val = str[1]; t_obj[key] = val; if(props_build[key]){ var str = key + ":" + props_build[key]; //props_build[key]; } return ''; }else{ return t; } }); for(var i in props_build){ tokens.push(i+":"+props_build[i]); } finder.val(tokens.join(' ').trim().toLowerCase()); finder.trigger('keyup'); }, handleError:function handleError(error){ var error = error || {message: "An error occured"}; var message = error.message || error.error.message || JSON.stringify(error); $("#error-message").text(message); $("#modal-error").modal('show'); }, handleCommand : function handleCommand(data){ if(data.confirm){ if (!confirm("Are you sure you want to do this?")){ return; } } var route = data['route']; var id = data['id']; var method = data['method'] || "get"; var template = data['template']; var display = data['display']; var payload = data['payload'] || {}; var processData = true; var contentType = true; console.log(payload.constructor.name) if(payload){ processData = false; contentType = false; if(payload.constructor.name != "FormData"){ var form = new FormData(); for(var i in payload){ form.append(i, payload[i]); } payload = form; } } var q = { method:method, url:route, dataType:'json', processData: processData, contentType: contentType, success:function(data){ if(data.error){ return alert(data.error); } if(data.html){ if(display){ $(display).html(data.html); }else{ $("body").append(data.html); } } if(template && display && data && data.length){ var html = _.map(data, function(media){ return jade.render(template,{item:media}); }); html = html.join(''); $(display).html(html); } }, error:function(err){ vod.handleError(err); } } if(method != 'get'){ q.data = payload; } $.ajax(q) }, handleCommandEvent : function handleCommandEvent(event){ var data = {}; var el = $(this); data.route = el.attr('data-cmd-route'); data.id = el.attr('data-id'); data.method = el.attr('data-cmd-method'); data.template = el.attr('data-cmd-template'); data.display = el.attr('data-cmd-display'); data.route = el.attr('data-cmd-route'); data.confirm = el.attr('data-cmd-confirm'); data.payload = new FormData(); $.each(this.attributes, function(){ if(this.name.indexOf('data-payload') != -1){ var attr = this.name.replace('data-payload-', ''); data.payload.append(attr,this.value); } }); vod.handleCommand(data); }, refreshCart: function(checkout){ $.getJSON('/cart', function(res){ if(res.error){ return alert(res.error); } if(res.medias && res.medias.length){ var html = jade.render('cart-items',{items:res.medias, total_bytes:res.total_bytes, total_price:res.total_price, checkout:checkout||false}); $("#cart-items").html(html); } }); }, getRequests: function(){ async.forever(function(next){ $.getJSON('/cart/pending-payment', function(res){ //remove transactions which doesn't exist console.log(res); $('.cart-authorize-item').each(function(){ var i = $(this); var id = i.attr('id'); var c = _.find(res,function(c){ console.log(c._id, id); return c._id == id; }); if(!c){ i.remove(); } }); res.forEach(function(c){ var exist = $("#" + c._id); if(exist.length){ return; } var html = jade.render('cart-items-admin',{item:c}); $("#requests").append(html); }); setTimeout(function(){ vod.getRequests(); },500) }).fail(function(){ setTimeout(function(){ vod.getRequests(); },500) }); }, function(err){ }) } }; var files_container = []; $(function(){ $('body').on('click', '.approve-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/'+id+'/approve', function(res){ }); }); $('body').on('click', '.remove-cart-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/remove', {id:id}, function(res){ if(res.error){ return alert(res.error); } vod.refreshCart(); }); }); var ads = $('.advertisement').map(function(){ var id = $(this).attr('data-id'); return id }); setTimeout(function(){ var arr = ads.toArray(); if(!arr.length) return; $.post('/ad/tick',{ids:arr}); },1000); $(window).scroll(function(){ if ($(window).scrollTop() + $(window).height() >= $(document).height() - 50){ $("#search-more").trigger('click'); } }); $('body').on('click', '.chart', function(){ var self = $(this); }); $('body').on('click', '.command', vod.handleCommandEvent); $('body').on('click', '#send-reset-password', function(){ var email = $("#reset-password-email").val(); if(email != ''){ $.post('/user/reset',{email:email}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); }); } }) $('body').on('click', '#confirm-reset-password', function(){ var email = $("#reset-password-email").val(); var code = $("#reset-password-code").val(); var password = $("#reset-password-password").val(); if(email != ''){ $.post('/user/reset-password',{email:email, code:code, password:password}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); if(res.message){ $('#modal-reset-account').modal('hide'); } }); } }) $('body').on('click', '#signin-help', function(){ $('#modal-reset-account').modal('show') }) $("body").on('click', '#send-request', function(){ var title = $('#request-title').val(); var details = $('#request-details').val(); if(title == ''){ return; } $.post('/media/request',{title:title, details:details}, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else{ $("#error-message").text(res.message); $("#modal-error").modal('show'); $("#modal-request").modal('hide'); $('#request-title').val(''); $('#request-details').val(''); } }); }); $("body").on('click', '.show-request-dialog', function(){ var val = $("#finder").val(); $("#request-title").val(val); $('#modal-request').modal('show') }); $("body").on('click', '#confirm-verification', function(){ var code = $('#verification-code').val(); if(!code || code == ""){ return; } $.post('/user/verify',{code:code}, function(res){ if(res.error){ return vod.handleError(res.error); } $('#modal-verify').modal('hide'); }); }); $("body").on('click', '#send-verification-code', function(){ $.post('/user/send-verification-sms') }); $('body').on('click', '.flag-file a', function(){ var id = $(this).parent().parent().attr('data-id'); var reason = $(this).text(); $.post('/media/flag', {id:id, reason:reason}, function(res){ }); }) $("body").on('click', '.subscription-toggle', function(){ var self = $(this); var id = self.attr('data-id'); $.post('/user/subscription-toggle',{id:id}, function(res){ if(res.error){ if(!res.error.verified){ $('#modal-verify').modal('show') } return; } if(res.watchlisted){ self.addClass('active'); self.find('span:first').removeClass('glyphicon-plus').addClass('glyphicon-ok'); self.find('.watchlist-label').html('&nbsp;watchlisted'); }else{ self.removeClass('active'); self.find('span:first').removeClass('glyphicon-ok').addClass('glyphicon-plus'); self.find('.watchlist-label').html('&nbsp;add to watchlist'); } }) }); $("body").on('click', '#signup', function(){ var fields = "username password".split(" "); var vals = {}; _.each(fields, function(f){vals[f] = $("#signup-" +f).val()}); var isEmpty = _.find(fields, function(f){return $("#signup-" +f).val() == "";}) ? true : false; if(isEmpty){ //return alert('Please fill all fields'); } //enforce a strong pass if(vals.password.length <=5 || "123456 asdfg".indexOf(vals.password) != -1){ $("#error-message").text('Please use a stronger password'); $("#modal-error").modal('show'); return; } if(vals.username.length != 7){ $("#error-message").text('Incorrect mobile number'); $("#modal-error").modal('show'); return; } $.post('/user/register',vals, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else if(res.message){ $("#error-message").text(res.message); $("#modal-error").modal('show'); $('#signup-form').slideUp('fast'); } }); }) $('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){ setTimeout(window.vod.filterSearch,0); }); $('body').on('click', '.vod-list-inline li', function(){ var self = $(this); self.siblings().removeClass('active');
}); $('body').on('click', '#search-more', function(){ var last = $("#search-content .media:last"); var id = last.attr('media-id'); vod.searchSince = id; vod.filterSearch(); }) $('body').on('click', '#filter-container-elements li', function(){ var self = $(this); var props = self.attr('filter-subprops'); if(props){ props = JSON.parse(props); var html = jade.render("listings-subpropfilter",{props:props}); var subprop = $("#listings-subpropfilter"); if(subprop.length){ subprop.html(html); }else{ $("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>"); } $("#listings-subpropfilter .btn-group-vertical").each(function(){ var self = $(this); var prop = self.attr('filter-prop'); self.find('label').each(function(){ var label = $(this); var text = label.text(); label.html('<input type="radio" name="'+prop+'">' + text); }) }); }else{ $("#listings-subpropfilter").html(''); } setTimeout(window.vod.filterSearch,0); }) $("body").on('keyup', '#finder', function(){ var t = window.vod.searchTime; if(t){ window.clearTimeout(window.vod.searchTime); } window.vod.searchTime = setTimeout(function(){ var val = $("#finder").val(); if(val == '' || val.length < 2){ $("#main-content").show(); $("#search-content").hide(); return; } if(window.vod.searchXHR){ window.vod.searchXHR.abort(); } var search_params = { query:val } $("#main-content").hide(); $("#search-content").show(); if(vod.searchSince){ search_params.since = vod.searchSince; } if(vod.searchType){ search_params.type = vod.searchType; } window.vod.searchXHR = $.getJSON('/media/search',search_params,function(res){ vod.searchSince = null; if(!res.length && !search_params.since){ var html = jade.render('not-found'); $("#search-content").html(html); return; } var items = _.map(res, function(m){ m.files = _.sortBy(m.files, function(a){ var score = parseFloat(a.season + '.' + a.episode); return score; }); var lastfile = _.last(m.files); m.lastfile = lastfile; return jade.render('item',{item:m, lastfile:lastfile}); }); if(search_params.since){ $("#search-content .listing").append(items.join('')); if(res.length < 4){ $("#search-more").remove(); } }else{ var more = res.length < 4 ?'':jade.render('search-more'); var html = '<ul class="dl-horizontal listing list-inline">'+items.join('')+'</ul>' + more; $("#search-content").html(html); } }); },300); }) $("body").on('click', '.media a', function(e){ // e.preventDefault(); // e.stopPropagation(); // var media = $(this).parent(); //vod.displayMedia(media, "html"); }); $("body").on("change", "#media-retrieve-data", function(){ var val = $(this).val(); $("#loader").show(); $.post('/media/retrieve-data',{url:val}, function(res){ $("#loader").hide(); for(prop in res){ $("#new-media-info-container input[media-prop='"+prop+"']").val(res[prop]); } $("#new-media-info-container textarea[media-prop='description']").html(res.description); $("#media-image").attr('src',"/media/pipe?url=" + res.poster); }); }); $("body").on("dragenter","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid #0B85A1'); }); $("body").on("dragend","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid red'); }); $("body").on("dragover","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); }); $("body").on("drop","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid transparent'); var files = e.originalEvent.dataTransfer.files; for(var i=0; i<files.length; i++){ files_container.push(files[i]); } renderFiles(); }); $("body").on('click', '#media-type label', function(){ $("#media-file-type").hide(); $("#media-image").attr("src",""); files_container.length = 0; window.vod.selectedMedia = {}; var self = $(this); var type = self.text(); var template = self.attr('media-template'); var html = jade.render(template); $("#file-container").html(''); files_container.length = 0; $("#new-media-info-container").html(html); if(type == "App"){ type = "application"; } var engine = new Bloodhound({ datumTokenizer: function(d){ return Bloodhound.tokenizers.whitespace(d.title) }, queryTokenizer: Bloodhound.tokenizers.whitespace, prefetch:{ url:'/media/search?type='+type.toLowerCase()+'&fields=title&include_unpublished=1&limit=1000000', ttl:1000, filter:function(media){ return $.map(media, function (data) { data.value = data.title; return data; }); } } }); engine.clearPrefetchCache(); engine.initialize(); $(".search").typeahead( { hint: true, highlight: true, minLength: 1 }, { displayKey: 'value', source: engine.ttAdapter() } ) .on("typeahead:selected", function(datum, obj){ $("#loader").show(); $.getJSON('/media/' + obj._id, function(obj){ $("#loader").hide(); $("#media-file-type label:first").trigger('click'); window.vod.selectedMedia = obj; $("[media-prop-type='text']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.val(obj[prop]); }); $("[media-prop-type='group']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.find('label').each(function(){ if($(this).text().trim() == obj[prop]){ $(this).addClass('active'); }else{ $(this).removeClass('active'); } }); }); $("#new-media-form-content").show(); $("#media-file-type").show(); var html = jade.render('file-details',{files:obj.files}); $("#display-uploaded-media").html(html); var options = jade.render('newmedia-media-options',{id:obj._id}); $("#save-info").parent().append(options); }); }) }) $("body").on('click', '#media-file-type label', function(){ var self = $(this); var template = self.attr('media-template'); var html = jade.render(template); $("#media-file-type-container").html(html); }); $('body').on('click', function (e) { if (!$(e.target).parent().parent().hasClass('media') && $(e.target).parents('.popover.in').length === 0) { $(".media").popover('destroy').removeClass('selected'); } }); $("body").on('click', '.remove-upload-file', function(){ var index = $(this).attr('data-index'); files_container.splice(index,1); renderFiles(); }); }); function renderFiles(){ $("#file-container").html(''); files_container.forEach(function(file, index){ var html = jade.render('file-container',{index:index, file:file.name||file.fileName, size:prettyBytes(file.size||file.fileSize)}); $("#file-container").append(html); }) } enquire.register("screen and (max-width:768px)", { match:function(){ vod.mediaDisplayType = "tabpop"; console.log(vod) }, unmatch:function(){ vod.mediaDisplayType = "popover"; console.log(vod) } }); var socket = window.socket = io.connect();
self.addClass("active");
random_line_split
web.js
//preload images // $.ajaxSetup({ // timeout:200 // }); var vod = window.vod = { selectedMedia:{}, mediaDisplayType:"popover", searchXHR:null, displayMedia:function(el, html){ //TODO: destroy any existing media var media = $(".media"); var data = el.attr("media-data"); var type = el.attr("media-type").toLowerCase(); media.removeClass('selected'); if(!data){ return; } data = JSON.parse(decodeURIComponent(data)); for(var file in data.files){ var len = data.files[file].size; data.files[file].size = prettyBytes(len); } if(type == "series" && data.files.length){ //latest episode var latest_episode = _.last(data.files); latest_episode.title = "S" + latest_episode.season + "E" + latest_episode.episode; data.latest_episode = latest_episode; data.description = data.latest_episode.plot || data.description; } el.addClass('selected'); media.popover('destroy'); $(".tabpop") .removeClass("tabpop-anim-in") .removeClass("tabpop-anim-out"); //display if(this.mediaDisplayType == "popover"){ el.popover({ html:true, content:jade.render(type, data) }).popover('show'); }else{ $(".tabpop").html(jade.render(type, data)) $(".tabpop").addClass("tabpop-anim-in"); } }, filterSearch:function(){ //get properties var props = $("[filter-prop]") var props_build = {}; if(props.length){ props.each(function(){ //debugger; var self = $(this) var prop = self.attr('filter-prop'); var val; if(self.find('li.active').length){ val = self.find('li.active').text() }else{ val = self.find('label.active').text() } if(val && val.length > 0){ props_build[prop] = val; } }); } //get str var finder = $("#finder"); var query = finder.val(); var tokens = query.split(' '); var t_obj = {}; tokens = _.map(tokens,function(t){ if(t.indexOf(":") != -1){ var str = t.split(':'); var key = str[0]; var val = str[1]; t_obj[key] = val; if(props_build[key]){ var str = key + ":" + props_build[key]; //props_build[key]; } return ''; }else{ return t; } }); for(var i in props_build){ tokens.push(i+":"+props_build[i]); } finder.val(tokens.join(' ').trim().toLowerCase()); finder.trigger('keyup'); }, handleError:function handleError(error){ var error = error || {message: "An error occured"}; var message = error.message || error.error.message || JSON.stringify(error); $("#error-message").text(message); $("#modal-error").modal('show'); }, handleCommand : function handleCommand(data){ if(data.confirm){ if (!confirm("Are you sure you want to do this?")){ return; } } var route = data['route']; var id = data['id']; var method = data['method'] || "get"; var template = data['template']; var display = data['display']; var payload = data['payload'] || {}; var processData = true; var contentType = true; console.log(payload.constructor.name) if(payload){ processData = false; contentType = false; if(payload.constructor.name != "FormData"){ var form = new FormData(); for(var i in payload){ form.append(i, payload[i]); } payload = form; } } var q = { method:method, url:route, dataType:'json', processData: processData, contentType: contentType, success:function(data){ if(data.error){ return alert(data.error); } if(data.html){ if(display){ $(display).html(data.html); }else{ $("body").append(data.html); } } if(template && display && data && data.length){ var html = _.map(data, function(media){ return jade.render(template,{item:media}); }); html = html.join(''); $(display).html(html); } }, error:function(err){ vod.handleError(err); } } if(method != 'get'){ q.data = payload; } $.ajax(q) }, handleCommandEvent : function handleCommandEvent(event){ var data = {}; var el = $(this); data.route = el.attr('data-cmd-route'); data.id = el.attr('data-id'); data.method = el.attr('data-cmd-method'); data.template = el.attr('data-cmd-template'); data.display = el.attr('data-cmd-display'); data.route = el.attr('data-cmd-route'); data.confirm = el.attr('data-cmd-confirm'); data.payload = new FormData(); $.each(this.attributes, function(){ if(this.name.indexOf('data-payload') != -1){ var attr = this.name.replace('data-payload-', ''); data.payload.append(attr,this.value); } }); vod.handleCommand(data); }, refreshCart: function(checkout){ $.getJSON('/cart', function(res){ if(res.error){ return alert(res.error); } if(res.medias && res.medias.length){ var html = jade.render('cart-items',{items:res.medias, total_bytes:res.total_bytes, total_price:res.total_price, checkout:checkout||false}); $("#cart-items").html(html); } }); }, getRequests: function(){ async.forever(function(next){ $.getJSON('/cart/pending-payment', function(res){ //remove transactions which doesn't exist console.log(res); $('.cart-authorize-item').each(function(){ var i = $(this); var id = i.attr('id'); var c = _.find(res,function(c){ console.log(c._id, id); return c._id == id; }); if(!c){ i.remove(); } }); res.forEach(function(c){ var exist = $("#" + c._id); if(exist.length){ return; } var html = jade.render('cart-items-admin',{item:c}); $("#requests").append(html); }); setTimeout(function(){ vod.getRequests(); },500) }).fail(function(){ setTimeout(function(){ vod.getRequests(); },500) }); }, function(err){ }) } }; var files_container = []; $(function(){ $('body').on('click', '.approve-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/'+id+'/approve', function(res){ }); }); $('body').on('click', '.remove-cart-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/remove', {id:id}, function(res){ if(res.error){ return alert(res.error); } vod.refreshCart(); }); }); var ads = $('.advertisement').map(function(){ var id = $(this).attr('data-id'); return id }); setTimeout(function(){ var arr = ads.toArray(); if(!arr.length) return; $.post('/ad/tick',{ids:arr}); },1000); $(window).scroll(function(){ if ($(window).scrollTop() + $(window).height() >= $(document).height() - 50){ $("#search-more").trigger('click'); } }); $('body').on('click', '.chart', function(){ var self = $(this); }); $('body').on('click', '.command', vod.handleCommandEvent); $('body').on('click', '#send-reset-password', function(){ var email = $("#reset-password-email").val(); if(email != ''){ $.post('/user/reset',{email:email}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); }); } }) $('body').on('click', '#confirm-reset-password', function(){ var email = $("#reset-password-email").val(); var code = $("#reset-password-code").val(); var password = $("#reset-password-password").val(); if(email != ''){ $.post('/user/reset-password',{email:email, code:code, password:password}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); if(res.message){ $('#modal-reset-account').modal('hide'); } }); } }) $('body').on('click', '#signin-help', function(){ $('#modal-reset-account').modal('show') }) $("body").on('click', '#send-request', function(){ var title = $('#request-title').val(); var details = $('#request-details').val(); if(title == ''){ return; } $.post('/media/request',{title:title, details:details}, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else{ $("#error-message").text(res.message); $("#modal-error").modal('show'); $("#modal-request").modal('hide'); $('#request-title').val(''); $('#request-details').val(''); } }); }); $("body").on('click', '.show-request-dialog', function(){ var val = $("#finder").val(); $("#request-title").val(val); $('#modal-request').modal('show') }); $("body").on('click', '#confirm-verification', function(){ var code = $('#verification-code').val(); if(!code || code == ""){ return; } $.post('/user/verify',{code:code}, function(res){ if(res.error){ return vod.handleError(res.error); } $('#modal-verify').modal('hide'); }); }); $("body").on('click', '#send-verification-code', function(){ $.post('/user/send-verification-sms') }); $('body').on('click', '.flag-file a', function(){ var id = $(this).parent().parent().attr('data-id'); var reason = $(this).text(); $.post('/media/flag', {id:id, reason:reason}, function(res){ }); }) $("body").on('click', '.subscription-toggle', function(){ var self = $(this); var id = self.attr('data-id'); $.post('/user/subscription-toggle',{id:id}, function(res){ if(res.error){ if(!res.error.verified){ $('#modal-verify').modal('show') } return; } if(res.watchlisted){ self.addClass('active'); self.find('span:first').removeClass('glyphicon-plus').addClass('glyphicon-ok'); self.find('.watchlist-label').html('&nbsp;watchlisted'); }else{ self.removeClass('active'); self.find('span:first').removeClass('glyphicon-ok').addClass('glyphicon-plus'); self.find('.watchlist-label').html('&nbsp;add to watchlist'); } }) }); $("body").on('click', '#signup', function(){ var fields = "username password".split(" "); var vals = {}; _.each(fields, function(f){vals[f] = $("#signup-" +f).val()}); var isEmpty = _.find(fields, function(f){return $("#signup-" +f).val() == "";}) ? true : false; if(isEmpty){ //return alert('Please fill all fields'); } //enforce a strong pass if(vals.password.length <=5 || "123456 asdfg".indexOf(vals.password) != -1){ $("#error-message").text('Please use a stronger password'); $("#modal-error").modal('show'); return; } if(vals.username.length != 7){ $("#error-message").text('Incorrect mobile number'); $("#modal-error").modal('show'); return; } $.post('/user/register',vals, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else if(res.message){ $("#error-message").text(res.message); $("#modal-error").modal('show'); $('#signup-form').slideUp('fast'); } }); }) $('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){ setTimeout(window.vod.filterSearch,0); }); $('body').on('click', '.vod-list-inline li', function(){ var self = $(this); self.siblings().removeClass('active'); self.addClass("active"); }); $('body').on('click', '#search-more', function(){ var last = $("#search-content .media:last"); var id = last.attr('media-id'); vod.searchSince = id; vod.filterSearch(); }) $('body').on('click', '#filter-container-elements li', function(){ var self = $(this); var props = self.attr('filter-subprops'); if(props)
else{ $("#listings-subpropfilter").html(''); } setTimeout(window.vod.filterSearch,0); }) $("body").on('keyup', '#finder', function(){ var t = window.vod.searchTime; if(t){ window.clearTimeout(window.vod.searchTime); } window.vod.searchTime = setTimeout(function(){ var val = $("#finder").val(); if(val == '' || val.length < 2){ $("#main-content").show(); $("#search-content").hide(); return; } if(window.vod.searchXHR){ window.vod.searchXHR.abort(); } var search_params = { query:val } $("#main-content").hide(); $("#search-content").show(); if(vod.searchSince){ search_params.since = vod.searchSince; } if(vod.searchType){ search_params.type = vod.searchType; } window.vod.searchXHR = $.getJSON('/media/search',search_params,function(res){ vod.searchSince = null; if(!res.length && !search_params.since){ var html = jade.render('not-found'); $("#search-content").html(html); return; } var items = _.map(res, function(m){ m.files = _.sortBy(m.files, function(a){ var score = parseFloat(a.season + '.' + a.episode); return score; }); var lastfile = _.last(m.files); m.lastfile = lastfile; return jade.render('item',{item:m, lastfile:lastfile}); }); if(search_params.since){ $("#search-content .listing").append(items.join('')); if(res.length < 4){ $("#search-more").remove(); } }else{ var more = res.length < 4 ?'':jade.render('search-more'); var html = '<ul class="dl-horizontal listing list-inline">'+items.join('')+'</ul>' + more; $("#search-content").html(html); } }); },300); }) $("body").on('click', '.media a', function(e){ // e.preventDefault(); // e.stopPropagation(); // var media = $(this).parent(); //vod.displayMedia(media, "html"); }); $("body").on("change", "#media-retrieve-data", function(){ var val = $(this).val(); $("#loader").show(); $.post('/media/retrieve-data',{url:val}, function(res){ $("#loader").hide(); for(prop in res){ $("#new-media-info-container input[media-prop='"+prop+"']").val(res[prop]); } $("#new-media-info-container textarea[media-prop='description']").html(res.description); $("#media-image").attr('src',"/media/pipe?url=" + res.poster); }); }); $("body").on("dragenter","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid #0B85A1'); }); $("body").on("dragend","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid red'); }); $("body").on("dragover","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); }); $("body").on("drop","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid transparent'); var files = e.originalEvent.dataTransfer.files; for(var i=0; i<files.length; i++){ files_container.push(files[i]); } renderFiles(); }); $("body").on('click', '#media-type label', function(){ $("#media-file-type").hide(); $("#media-image").attr("src",""); files_container.length = 0; window.vod.selectedMedia = {}; var self = $(this); var type = self.text(); var template = self.attr('media-template'); var html = jade.render(template); $("#file-container").html(''); files_container.length = 0; $("#new-media-info-container").html(html); if(type == "App"){ type = "application"; } var engine = new Bloodhound({ datumTokenizer: function(d){ return Bloodhound.tokenizers.whitespace(d.title) }, queryTokenizer: Bloodhound.tokenizers.whitespace, prefetch:{ url:'/media/search?type='+type.toLowerCase()+'&fields=title&include_unpublished=1&limit=1000000', ttl:1000, filter:function(media){ return $.map(media, function (data) { data.value = data.title; return data; }); } } }); engine.clearPrefetchCache(); engine.initialize(); $(".search").typeahead( { hint: true, highlight: true, minLength: 1 }, { displayKey: 'value', source: engine.ttAdapter() } ) .on("typeahead:selected", function(datum, obj){ $("#loader").show(); $.getJSON('/media/' + obj._id, function(obj){ $("#loader").hide(); $("#media-file-type label:first").trigger('click'); window.vod.selectedMedia = obj; $("[media-prop-type='text']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.val(obj[prop]); }); $("[media-prop-type='group']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.find('label').each(function(){ if($(this).text().trim() == obj[prop]){ $(this).addClass('active'); }else{ $(this).removeClass('active'); } }); }); $("#new-media-form-content").show(); $("#media-file-type").show(); var html = jade.render('file-details',{files:obj.files}); $("#display-uploaded-media").html(html); var options = jade.render('newmedia-media-options',{id:obj._id}); $("#save-info").parent().append(options); }); }) }) $("body").on('click', '#media-file-type label', function(){ var self = $(this); var template = self.attr('media-template'); var html = jade.render(template); $("#media-file-type-container").html(html); }); $('body').on('click', function (e) { if (!$(e.target).parent().parent().hasClass('media') && $(e.target).parents('.popover.in').length === 0) { $(".media").popover('destroy').removeClass('selected'); } }); $("body").on('click', '.remove-upload-file', function(){ var index = $(this).attr('data-index'); files_container.splice(index,1); renderFiles(); }); }); function renderFiles(){ $("#file-container").html(''); files_container.forEach(function(file, index){ var html = jade.render('file-container',{index:index, file:file.name||file.fileName, size:prettyBytes(file.size||file.fileSize)}); $("#file-container").append(html); }) } enquire.register("screen and (max-width:768px)", { match:function(){ vod.mediaDisplayType = "tabpop"; console.log(vod) }, unmatch:function(){ vod.mediaDisplayType = "popover"; console.log(vod) } }); var socket = window.socket = io.connect();
{ props = JSON.parse(props); var html = jade.render("listings-subpropfilter",{props:props}); var subprop = $("#listings-subpropfilter"); if(subprop.length){ subprop.html(html); }else{ $("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>"); } $("#listings-subpropfilter .btn-group-vertical").each(function(){ var self = $(this); var prop = self.attr('filter-prop'); self.find('label').each(function(){ var label = $(this); var text = label.text(); label.html('<input type="radio" name="'+prop+'">' + text); }) }); }
conditional_block
web.js
//preload images // $.ajaxSetup({ // timeout:200 // }); var vod = window.vod = { selectedMedia:{}, mediaDisplayType:"popover", searchXHR:null, displayMedia:function(el, html){ //TODO: destroy any existing media var media = $(".media"); var data = el.attr("media-data"); var type = el.attr("media-type").toLowerCase(); media.removeClass('selected'); if(!data){ return; } data = JSON.parse(decodeURIComponent(data)); for(var file in data.files){ var len = data.files[file].size; data.files[file].size = prettyBytes(len); } if(type == "series" && data.files.length){ //latest episode var latest_episode = _.last(data.files); latest_episode.title = "S" + latest_episode.season + "E" + latest_episode.episode; data.latest_episode = latest_episode; data.description = data.latest_episode.plot || data.description; } el.addClass('selected'); media.popover('destroy'); $(".tabpop") .removeClass("tabpop-anim-in") .removeClass("tabpop-anim-out"); //display if(this.mediaDisplayType == "popover"){ el.popover({ html:true, content:jade.render(type, data) }).popover('show'); }else{ $(".tabpop").html(jade.render(type, data)) $(".tabpop").addClass("tabpop-anim-in"); } }, filterSearch:function(){ //get properties var props = $("[filter-prop]") var props_build = {}; if(props.length){ props.each(function(){ //debugger; var self = $(this) var prop = self.attr('filter-prop'); var val; if(self.find('li.active').length){ val = self.find('li.active').text() }else{ val = self.find('label.active').text() } if(val && val.length > 0){ props_build[prop] = val; } }); } //get str var finder = $("#finder"); var query = finder.val(); var tokens = query.split(' '); var t_obj = {}; tokens = _.map(tokens,function(t){ if(t.indexOf(":") != -1){ var str = t.split(':'); var key = str[0]; var val = str[1]; t_obj[key] = val; if(props_build[key]){ var str = key + ":" + props_build[key]; //props_build[key]; } return ''; }else{ return t; } }); for(var i in props_build){ tokens.push(i+":"+props_build[i]); } finder.val(tokens.join(' ').trim().toLowerCase()); finder.trigger('keyup'); }, handleError:function handleError(error){ var error = error || {message: "An error occured"}; var message = error.message || error.error.message || JSON.stringify(error); $("#error-message").text(message); $("#modal-error").modal('show'); }, handleCommand : function handleCommand(data){ if(data.confirm){ if (!confirm("Are you sure you want to do this?")){ return; } } var route = data['route']; var id = data['id']; var method = data['method'] || "get"; var template = data['template']; var display = data['display']; var payload = data['payload'] || {}; var processData = true; var contentType = true; console.log(payload.constructor.name) if(payload){ processData = false; contentType = false; if(payload.constructor.name != "FormData"){ var form = new FormData(); for(var i in payload){ form.append(i, payload[i]); } payload = form; } } var q = { method:method, url:route, dataType:'json', processData: processData, contentType: contentType, success:function(data){ if(data.error){ return alert(data.error); } if(data.html){ if(display){ $(display).html(data.html); }else{ $("body").append(data.html); } } if(template && display && data && data.length){ var html = _.map(data, function(media){ return jade.render(template,{item:media}); }); html = html.join(''); $(display).html(html); } }, error:function(err){ vod.handleError(err); } } if(method != 'get'){ q.data = payload; } $.ajax(q) }, handleCommandEvent : function handleCommandEvent(event){ var data = {}; var el = $(this); data.route = el.attr('data-cmd-route'); data.id = el.attr('data-id'); data.method = el.attr('data-cmd-method'); data.template = el.attr('data-cmd-template'); data.display = el.attr('data-cmd-display'); data.route = el.attr('data-cmd-route'); data.confirm = el.attr('data-cmd-confirm'); data.payload = new FormData(); $.each(this.attributes, function(){ if(this.name.indexOf('data-payload') != -1){ var attr = this.name.replace('data-payload-', ''); data.payload.append(attr,this.value); } }); vod.handleCommand(data); }, refreshCart: function(checkout){ $.getJSON('/cart', function(res){ if(res.error){ return alert(res.error); } if(res.medias && res.medias.length){ var html = jade.render('cart-items',{items:res.medias, total_bytes:res.total_bytes, total_price:res.total_price, checkout:checkout||false}); $("#cart-items").html(html); } }); }, getRequests: function(){ async.forever(function(next){ $.getJSON('/cart/pending-payment', function(res){ //remove transactions which doesn't exist console.log(res); $('.cart-authorize-item').each(function(){ var i = $(this); var id = i.attr('id'); var c = _.find(res,function(c){ console.log(c._id, id); return c._id == id; }); if(!c){ i.remove(); } }); res.forEach(function(c){ var exist = $("#" + c._id); if(exist.length){ return; } var html = jade.render('cart-items-admin',{item:c}); $("#requests").append(html); }); setTimeout(function(){ vod.getRequests(); },500) }).fail(function(){ setTimeout(function(){ vod.getRequests(); },500) }); }, function(err){ }) } }; var files_container = []; $(function(){ $('body').on('click', '.approve-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/'+id+'/approve', function(res){ }); }); $('body').on('click', '.remove-cart-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/remove', {id:id}, function(res){ if(res.error){ return alert(res.error); } vod.refreshCart(); }); }); var ads = $('.advertisement').map(function(){ var id = $(this).attr('data-id'); return id }); setTimeout(function(){ var arr = ads.toArray(); if(!arr.length) return; $.post('/ad/tick',{ids:arr}); },1000); $(window).scroll(function(){ if ($(window).scrollTop() + $(window).height() >= $(document).height() - 50){ $("#search-more").trigger('click'); } }); $('body').on('click', '.chart', function(){ var self = $(this); }); $('body').on('click', '.command', vod.handleCommandEvent); $('body').on('click', '#send-reset-password', function(){ var email = $("#reset-password-email").val(); if(email != ''){ $.post('/user/reset',{email:email}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); }); } }) $('body').on('click', '#confirm-reset-password', function(){ var email = $("#reset-password-email").val(); var code = $("#reset-password-code").val(); var password = $("#reset-password-password").val(); if(email != ''){ $.post('/user/reset-password',{email:email, code:code, password:password}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); if(res.message){ $('#modal-reset-account').modal('hide'); } }); } }) $('body').on('click', '#signin-help', function(){ $('#modal-reset-account').modal('show') }) $("body").on('click', '#send-request', function(){ var title = $('#request-title').val(); var details = $('#request-details').val(); if(title == ''){ return; } $.post('/media/request',{title:title, details:details}, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else{ $("#error-message").text(res.message); $("#modal-error").modal('show'); $("#modal-request").modal('hide'); $('#request-title').val(''); $('#request-details').val(''); } }); }); $("body").on('click', '.show-request-dialog', function(){ var val = $("#finder").val(); $("#request-title").val(val); $('#modal-request').modal('show') }); $("body").on('click', '#confirm-verification', function(){ var code = $('#verification-code').val(); if(!code || code == ""){ return; } $.post('/user/verify',{code:code}, function(res){ if(res.error){ return vod.handleError(res.error); } $('#modal-verify').modal('hide'); }); }); $("body").on('click', '#send-verification-code', function(){ $.post('/user/send-verification-sms') }); $('body').on('click', '.flag-file a', function(){ var id = $(this).parent().parent().attr('data-id'); var reason = $(this).text(); $.post('/media/flag', {id:id, reason:reason}, function(res){ }); }) $("body").on('click', '.subscription-toggle', function(){ var self = $(this); var id = self.attr('data-id'); $.post('/user/subscription-toggle',{id:id}, function(res){ if(res.error){ if(!res.error.verified){ $('#modal-verify').modal('show') } return; } if(res.watchlisted){ self.addClass('active'); self.find('span:first').removeClass('glyphicon-plus').addClass('glyphicon-ok'); self.find('.watchlist-label').html('&nbsp;watchlisted'); }else{ self.removeClass('active'); self.find('span:first').removeClass('glyphicon-ok').addClass('glyphicon-plus'); self.find('.watchlist-label').html('&nbsp;add to watchlist'); } }) }); $("body").on('click', '#signup', function(){ var fields = "username password".split(" "); var vals = {}; _.each(fields, function(f){vals[f] = $("#signup-" +f).val()}); var isEmpty = _.find(fields, function(f){return $("#signup-" +f).val() == "";}) ? true : false; if(isEmpty){ //return alert('Please fill all fields'); } //enforce a strong pass if(vals.password.length <=5 || "123456 asdfg".indexOf(vals.password) != -1){ $("#error-message").text('Please use a stronger password'); $("#modal-error").modal('show'); return; } if(vals.username.length != 7){ $("#error-message").text('Incorrect mobile number'); $("#modal-error").modal('show'); return; } $.post('/user/register',vals, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else if(res.message){ $("#error-message").text(res.message); $("#modal-error").modal('show'); $('#signup-form').slideUp('fast'); } }); }) $('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){ setTimeout(window.vod.filterSearch,0); }); $('body').on('click', '.vod-list-inline li', function(){ var self = $(this); self.siblings().removeClass('active'); self.addClass("active"); }); $('body').on('click', '#search-more', function(){ var last = $("#search-content .media:last"); var id = last.attr('media-id'); vod.searchSince = id; vod.filterSearch(); }) $('body').on('click', '#filter-container-elements li', function(){ var self = $(this); var props = self.attr('filter-subprops'); if(props){ props = JSON.parse(props); var html = jade.render("listings-subpropfilter",{props:props}); var subprop = $("#listings-subpropfilter"); if(subprop.length){ subprop.html(html); }else{ $("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>"); } $("#listings-subpropfilter .btn-group-vertical").each(function(){ var self = $(this); var prop = self.attr('filter-prop'); self.find('label').each(function(){ var label = $(this); var text = label.text(); label.html('<input type="radio" name="'+prop+'">' + text); }) }); }else{ $("#listings-subpropfilter").html(''); } setTimeout(window.vod.filterSearch,0); }) $("body").on('keyup', '#finder', function(){ var t = window.vod.searchTime; if(t){ window.clearTimeout(window.vod.searchTime); } window.vod.searchTime = setTimeout(function(){ var val = $("#finder").val(); if(val == '' || val.length < 2){ $("#main-content").show(); $("#search-content").hide(); return; } if(window.vod.searchXHR){ window.vod.searchXHR.abort(); } var search_params = { query:val } $("#main-content").hide(); $("#search-content").show(); if(vod.searchSince){ search_params.since = vod.searchSince; } if(vod.searchType){ search_params.type = vod.searchType; } window.vod.searchXHR = $.getJSON('/media/search',search_params,function(res){ vod.searchSince = null; if(!res.length && !search_params.since){ var html = jade.render('not-found'); $("#search-content").html(html); return; } var items = _.map(res, function(m){ m.files = _.sortBy(m.files, function(a){ var score = parseFloat(a.season + '.' + a.episode); return score; }); var lastfile = _.last(m.files); m.lastfile = lastfile; return jade.render('item',{item:m, lastfile:lastfile}); }); if(search_params.since){ $("#search-content .listing").append(items.join('')); if(res.length < 4){ $("#search-more").remove(); } }else{ var more = res.length < 4 ?'':jade.render('search-more'); var html = '<ul class="dl-horizontal listing list-inline">'+items.join('')+'</ul>' + more; $("#search-content").html(html); } }); },300); }) $("body").on('click', '.media a', function(e){ // e.preventDefault(); // e.stopPropagation(); // var media = $(this).parent(); //vod.displayMedia(media, "html"); }); $("body").on("change", "#media-retrieve-data", function(){ var val = $(this).val(); $("#loader").show(); $.post('/media/retrieve-data',{url:val}, function(res){ $("#loader").hide(); for(prop in res){ $("#new-media-info-container input[media-prop='"+prop+"']").val(res[prop]); } $("#new-media-info-container textarea[media-prop='description']").html(res.description); $("#media-image").attr('src',"/media/pipe?url=" + res.poster); }); }); $("body").on("dragenter","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid #0B85A1'); }); $("body").on("dragend","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid red'); }); $("body").on("dragover","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); }); $("body").on("drop","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid transparent'); var files = e.originalEvent.dataTransfer.files; for(var i=0; i<files.length; i++){ files_container.push(files[i]); } renderFiles(); }); $("body").on('click', '#media-type label', function(){ $("#media-file-type").hide(); $("#media-image").attr("src",""); files_container.length = 0; window.vod.selectedMedia = {}; var self = $(this); var type = self.text(); var template = self.attr('media-template'); var html = jade.render(template); $("#file-container").html(''); files_container.length = 0; $("#new-media-info-container").html(html); if(type == "App"){ type = "application"; } var engine = new Bloodhound({ datumTokenizer: function(d){ return Bloodhound.tokenizers.whitespace(d.title) }, queryTokenizer: Bloodhound.tokenizers.whitespace, prefetch:{ url:'/media/search?type='+type.toLowerCase()+'&fields=title&include_unpublished=1&limit=1000000', ttl:1000, filter:function(media){ return $.map(media, function (data) { data.value = data.title; return data; }); } } }); engine.clearPrefetchCache(); engine.initialize(); $(".search").typeahead( { hint: true, highlight: true, minLength: 1 }, { displayKey: 'value', source: engine.ttAdapter() } ) .on("typeahead:selected", function(datum, obj){ $("#loader").show(); $.getJSON('/media/' + obj._id, function(obj){ $("#loader").hide(); $("#media-file-type label:first").trigger('click'); window.vod.selectedMedia = obj; $("[media-prop-type='text']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.val(obj[prop]); }); $("[media-prop-type='group']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.find('label').each(function(){ if($(this).text().trim() == obj[prop]){ $(this).addClass('active'); }else{ $(this).removeClass('active'); } }); }); $("#new-media-form-content").show(); $("#media-file-type").show(); var html = jade.render('file-details',{files:obj.files}); $("#display-uploaded-media").html(html); var options = jade.render('newmedia-media-options',{id:obj._id}); $("#save-info").parent().append(options); }); }) }) $("body").on('click', '#media-file-type label', function(){ var self = $(this); var template = self.attr('media-template'); var html = jade.render(template); $("#media-file-type-container").html(html); }); $('body').on('click', function (e) { if (!$(e.target).parent().parent().hasClass('media') && $(e.target).parents('.popover.in').length === 0) { $(".media").popover('destroy').removeClass('selected'); } }); $("body").on('click', '.remove-upload-file', function(){ var index = $(this).attr('data-index'); files_container.splice(index,1); renderFiles(); }); }); function
(){ $("#file-container").html(''); files_container.forEach(function(file, index){ var html = jade.render('file-container',{index:index, file:file.name||file.fileName, size:prettyBytes(file.size||file.fileSize)}); $("#file-container").append(html); }) } enquire.register("screen and (max-width:768px)", { match:function(){ vod.mediaDisplayType = "tabpop"; console.log(vod) }, unmatch:function(){ vod.mediaDisplayType = "popover"; console.log(vod) } }); var socket = window.socket = io.connect();
renderFiles
identifier_name
web.js
//preload images // $.ajaxSetup({ // timeout:200 // }); var vod = window.vod = { selectedMedia:{}, mediaDisplayType:"popover", searchXHR:null, displayMedia:function(el, html){ //TODO: destroy any existing media var media = $(".media"); var data = el.attr("media-data"); var type = el.attr("media-type").toLowerCase(); media.removeClass('selected'); if(!data){ return; } data = JSON.parse(decodeURIComponent(data)); for(var file in data.files){ var len = data.files[file].size; data.files[file].size = prettyBytes(len); } if(type == "series" && data.files.length){ //latest episode var latest_episode = _.last(data.files); latest_episode.title = "S" + latest_episode.season + "E" + latest_episode.episode; data.latest_episode = latest_episode; data.description = data.latest_episode.plot || data.description; } el.addClass('selected'); media.popover('destroy'); $(".tabpop") .removeClass("tabpop-anim-in") .removeClass("tabpop-anim-out"); //display if(this.mediaDisplayType == "popover"){ el.popover({ html:true, content:jade.render(type, data) }).popover('show'); }else{ $(".tabpop").html(jade.render(type, data)) $(".tabpop").addClass("tabpop-anim-in"); } }, filterSearch:function(){ //get properties var props = $("[filter-prop]") var props_build = {}; if(props.length){ props.each(function(){ //debugger; var self = $(this) var prop = self.attr('filter-prop'); var val; if(self.find('li.active').length){ val = self.find('li.active').text() }else{ val = self.find('label.active').text() } if(val && val.length > 0){ props_build[prop] = val; } }); } //get str var finder = $("#finder"); var query = finder.val(); var tokens = query.split(' '); var t_obj = {}; tokens = _.map(tokens,function(t){ if(t.indexOf(":") != -1){ var str = t.split(':'); var key = str[0]; var val = str[1]; t_obj[key] = val; if(props_build[key]){ var str = key + ":" + props_build[key]; //props_build[key]; } return ''; }else{ return t; } }); for(var i in props_build){ tokens.push(i+":"+props_build[i]); } finder.val(tokens.join(' ').trim().toLowerCase()); finder.trigger('keyup'); }, handleError:function handleError(error){ var error = error || {message: "An error occured"}; var message = error.message || error.error.message || JSON.stringify(error); $("#error-message").text(message); $("#modal-error").modal('show'); }, handleCommand : function handleCommand(data){ if(data.confirm){ if (!confirm("Are you sure you want to do this?")){ return; } } var route = data['route']; var id = data['id']; var method = data['method'] || "get"; var template = data['template']; var display = data['display']; var payload = data['payload'] || {}; var processData = true; var contentType = true; console.log(payload.constructor.name) if(payload){ processData = false; contentType = false; if(payload.constructor.name != "FormData"){ var form = new FormData(); for(var i in payload){ form.append(i, payload[i]); } payload = form; } } var q = { method:method, url:route, dataType:'json', processData: processData, contentType: contentType, success:function(data){ if(data.error){ return alert(data.error); } if(data.html){ if(display){ $(display).html(data.html); }else{ $("body").append(data.html); } } if(template && display && data && data.length){ var html = _.map(data, function(media){ return jade.render(template,{item:media}); }); html = html.join(''); $(display).html(html); } }, error:function(err){ vod.handleError(err); } } if(method != 'get'){ q.data = payload; } $.ajax(q) }, handleCommandEvent : function handleCommandEvent(event){ var data = {}; var el = $(this); data.route = el.attr('data-cmd-route'); data.id = el.attr('data-id'); data.method = el.attr('data-cmd-method'); data.template = el.attr('data-cmd-template'); data.display = el.attr('data-cmd-display'); data.route = el.attr('data-cmd-route'); data.confirm = el.attr('data-cmd-confirm'); data.payload = new FormData(); $.each(this.attributes, function(){ if(this.name.indexOf('data-payload') != -1){ var attr = this.name.replace('data-payload-', ''); data.payload.append(attr,this.value); } }); vod.handleCommand(data); }, refreshCart: function(checkout){ $.getJSON('/cart', function(res){ if(res.error){ return alert(res.error); } if(res.medias && res.medias.length){ var html = jade.render('cart-items',{items:res.medias, total_bytes:res.total_bytes, total_price:res.total_price, checkout:checkout||false}); $("#cart-items").html(html); } }); }, getRequests: function(){ async.forever(function(next){ $.getJSON('/cart/pending-payment', function(res){ //remove transactions which doesn't exist console.log(res); $('.cart-authorize-item').each(function(){ var i = $(this); var id = i.attr('id'); var c = _.find(res,function(c){ console.log(c._id, id); return c._id == id; }); if(!c){ i.remove(); } }); res.forEach(function(c){ var exist = $("#" + c._id); if(exist.length){ return; } var html = jade.render('cart-items-admin',{item:c}); $("#requests").append(html); }); setTimeout(function(){ vod.getRequests(); },500) }).fail(function(){ setTimeout(function(){ vod.getRequests(); },500) }); }, function(err){ }) } }; var files_container = []; $(function(){ $('body').on('click', '.approve-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/'+id+'/approve', function(res){ }); }); $('body').on('click', '.remove-cart-item', function(){ var id = $(this).attr('data-id'); $.post('/cart/remove', {id:id}, function(res){ if(res.error){ return alert(res.error); } vod.refreshCart(); }); }); var ads = $('.advertisement').map(function(){ var id = $(this).attr('data-id'); return id }); setTimeout(function(){ var arr = ads.toArray(); if(!arr.length) return; $.post('/ad/tick',{ids:arr}); },1000); $(window).scroll(function(){ if ($(window).scrollTop() + $(window).height() >= $(document).height() - 50){ $("#search-more").trigger('click'); } }); $('body').on('click', '.chart', function(){ var self = $(this); }); $('body').on('click', '.command', vod.handleCommandEvent); $('body').on('click', '#send-reset-password', function(){ var email = $("#reset-password-email").val(); if(email != ''){ $.post('/user/reset',{email:email}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); }); } }) $('body').on('click', '#confirm-reset-password', function(){ var email = $("#reset-password-email").val(); var code = $("#reset-password-code").val(); var password = $("#reset-password-password").val(); if(email != ''){ $.post('/user/reset-password',{email:email, code:code, password:password}, function(res){ $("#error-message").text(res.error || res.message); $("#modal-error").modal('show'); if(res.message){ $('#modal-reset-account').modal('hide'); } }); } }) $('body').on('click', '#signin-help', function(){ $('#modal-reset-account').modal('show') }) $("body").on('click', '#send-request', function(){ var title = $('#request-title').val(); var details = $('#request-details').val(); if(title == ''){ return; } $.post('/media/request',{title:title, details:details}, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else{ $("#error-message").text(res.message); $("#modal-error").modal('show'); $("#modal-request").modal('hide'); $('#request-title').val(''); $('#request-details').val(''); } }); }); $("body").on('click', '.show-request-dialog', function(){ var val = $("#finder").val(); $("#request-title").val(val); $('#modal-request').modal('show') }); $("body").on('click', '#confirm-verification', function(){ var code = $('#verification-code').val(); if(!code || code == ""){ return; } $.post('/user/verify',{code:code}, function(res){ if(res.error){ return vod.handleError(res.error); } $('#modal-verify').modal('hide'); }); }); $("body").on('click', '#send-verification-code', function(){ $.post('/user/send-verification-sms') }); $('body').on('click', '.flag-file a', function(){ var id = $(this).parent().parent().attr('data-id'); var reason = $(this).text(); $.post('/media/flag', {id:id, reason:reason}, function(res){ }); }) $("body").on('click', '.subscription-toggle', function(){ var self = $(this); var id = self.attr('data-id'); $.post('/user/subscription-toggle',{id:id}, function(res){ if(res.error){ if(!res.error.verified){ $('#modal-verify').modal('show') } return; } if(res.watchlisted){ self.addClass('active'); self.find('span:first').removeClass('glyphicon-plus').addClass('glyphicon-ok'); self.find('.watchlist-label').html('&nbsp;watchlisted'); }else{ self.removeClass('active'); self.find('span:first').removeClass('glyphicon-ok').addClass('glyphicon-plus'); self.find('.watchlist-label').html('&nbsp;add to watchlist'); } }) }); $("body").on('click', '#signup', function(){ var fields = "username password".split(" "); var vals = {}; _.each(fields, function(f){vals[f] = $("#signup-" +f).val()}); var isEmpty = _.find(fields, function(f){return $("#signup-" +f).val() == "";}) ? true : false; if(isEmpty){ //return alert('Please fill all fields'); } //enforce a strong pass if(vals.password.length <=5 || "123456 asdfg".indexOf(vals.password) != -1){ $("#error-message").text('Please use a stronger password'); $("#modal-error").modal('show'); return; } if(vals.username.length != 7){ $("#error-message").text('Incorrect mobile number'); $("#modal-error").modal('show'); return; } $.post('/user/register',vals, function(res){ if(res.error){ $("#error-message").text(res.error); $("#modal-error").modal('show'); }else if(res.message){ $("#error-message").text(res.message); $("#modal-error").modal('show'); $('#signup-form').slideUp('fast'); } }); }) $('body').on('click', '#listings-subpropfilter .btn-group-vertical label', function(){ setTimeout(window.vod.filterSearch,0); }); $('body').on('click', '.vod-list-inline li', function(){ var self = $(this); self.siblings().removeClass('active'); self.addClass("active"); }); $('body').on('click', '#search-more', function(){ var last = $("#search-content .media:last"); var id = last.attr('media-id'); vod.searchSince = id; vod.filterSearch(); }) $('body').on('click', '#filter-container-elements li', function(){ var self = $(this); var props = self.attr('filter-subprops'); if(props){ props = JSON.parse(props); var html = jade.render("listings-subpropfilter",{props:props}); var subprop = $("#listings-subpropfilter"); if(subprop.length){ subprop.html(html); }else{ $("#sub-contents").prepend("<section id='listings-subpropfilter'>" + html + "</section>"); } $("#listings-subpropfilter .btn-group-vertical").each(function(){ var self = $(this); var prop = self.attr('filter-prop'); self.find('label').each(function(){ var label = $(this); var text = label.text(); label.html('<input type="radio" name="'+prop+'">' + text); }) }); }else{ $("#listings-subpropfilter").html(''); } setTimeout(window.vod.filterSearch,0); }) $("body").on('keyup', '#finder', function(){ var t = window.vod.searchTime; if(t){ window.clearTimeout(window.vod.searchTime); } window.vod.searchTime = setTimeout(function(){ var val = $("#finder").val(); if(val == '' || val.length < 2){ $("#main-content").show(); $("#search-content").hide(); return; } if(window.vod.searchXHR){ window.vod.searchXHR.abort(); } var search_params = { query:val } $("#main-content").hide(); $("#search-content").show(); if(vod.searchSince){ search_params.since = vod.searchSince; } if(vod.searchType){ search_params.type = vod.searchType; } window.vod.searchXHR = $.getJSON('/media/search',search_params,function(res){ vod.searchSince = null; if(!res.length && !search_params.since){ var html = jade.render('not-found'); $("#search-content").html(html); return; } var items = _.map(res, function(m){ m.files = _.sortBy(m.files, function(a){ var score = parseFloat(a.season + '.' + a.episode); return score; }); var lastfile = _.last(m.files); m.lastfile = lastfile; return jade.render('item',{item:m, lastfile:lastfile}); }); if(search_params.since){ $("#search-content .listing").append(items.join('')); if(res.length < 4){ $("#search-more").remove(); } }else{ var more = res.length < 4 ?'':jade.render('search-more'); var html = '<ul class="dl-horizontal listing list-inline">'+items.join('')+'</ul>' + more; $("#search-content").html(html); } }); },300); }) $("body").on('click', '.media a', function(e){ // e.preventDefault(); // e.stopPropagation(); // var media = $(this).parent(); //vod.displayMedia(media, "html"); }); $("body").on("change", "#media-retrieve-data", function(){ var val = $(this).val(); $("#loader").show(); $.post('/media/retrieve-data',{url:val}, function(res){ $("#loader").hide(); for(prop in res){ $("#new-media-info-container input[media-prop='"+prop+"']").val(res[prop]); } $("#new-media-info-container textarea[media-prop='description']").html(res.description); $("#media-image").attr('src',"/media/pipe?url=" + res.poster); }); }); $("body").on("dragenter","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid #0B85A1'); }); $("body").on("dragend","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid red'); }); $("body").on("dragover","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); }); $("body").on("drop","#file-dropper", function(e){ e.stopPropagation(); e.preventDefault(); var self = $(this); self.css('border', '2px solid transparent'); var files = e.originalEvent.dataTransfer.files; for(var i=0; i<files.length; i++){ files_container.push(files[i]); } renderFiles(); }); $("body").on('click', '#media-type label', function(){ $("#media-file-type").hide(); $("#media-image").attr("src",""); files_container.length = 0; window.vod.selectedMedia = {}; var self = $(this); var type = self.text(); var template = self.attr('media-template'); var html = jade.render(template); $("#file-container").html(''); files_container.length = 0; $("#new-media-info-container").html(html); if(type == "App"){ type = "application"; } var engine = new Bloodhound({ datumTokenizer: function(d){ return Bloodhound.tokenizers.whitespace(d.title) }, queryTokenizer: Bloodhound.tokenizers.whitespace, prefetch:{ url:'/media/search?type='+type.toLowerCase()+'&fields=title&include_unpublished=1&limit=1000000', ttl:1000, filter:function(media){ return $.map(media, function (data) { data.value = data.title; return data; }); } } }); engine.clearPrefetchCache(); engine.initialize(); $(".search").typeahead( { hint: true, highlight: true, minLength: 1 }, { displayKey: 'value', source: engine.ttAdapter() } ) .on("typeahead:selected", function(datum, obj){ $("#loader").show(); $.getJSON('/media/' + obj._id, function(obj){ $("#loader").hide(); $("#media-file-type label:first").trigger('click'); window.vod.selectedMedia = obj; $("[media-prop-type='text']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.val(obj[prop]); }); $("[media-prop-type='group']").each(function(){ var self = $(this); var prop = self.attr("media-prop"); self.find('label').each(function(){ if($(this).text().trim() == obj[prop]){ $(this).addClass('active'); }else{ $(this).removeClass('active'); } }); }); $("#new-media-form-content").show(); $("#media-file-type").show(); var html = jade.render('file-details',{files:obj.files}); $("#display-uploaded-media").html(html); var options = jade.render('newmedia-media-options',{id:obj._id}); $("#save-info").parent().append(options); }); }) }) $("body").on('click', '#media-file-type label', function(){ var self = $(this); var template = self.attr('media-template'); var html = jade.render(template); $("#media-file-type-container").html(html); }); $('body').on('click', function (e) { if (!$(e.target).parent().parent().hasClass('media') && $(e.target).parents('.popover.in').length === 0) { $(".media").popover('destroy').removeClass('selected'); } }); $("body").on('click', '.remove-upload-file', function(){ var index = $(this).attr('data-index'); files_container.splice(index,1); renderFiles(); }); }); function renderFiles()
enquire.register("screen and (max-width:768px)", { match:function(){ vod.mediaDisplayType = "tabpop"; console.log(vod) }, unmatch:function(){ vod.mediaDisplayType = "popover"; console.log(vod) } }); var socket = window.socket = io.connect();
{ $("#file-container").html(''); files_container.forEach(function(file, index){ var html = jade.render('file-container',{index:index, file:file.name||file.fileName, size:prettyBytes(file.size||file.fileSize)}); $("#file-container").append(html); }) }
identifier_body
db_explorer.go
package main import ( "net/http" "net/url" "database/sql" "encoding/json" "fmt" "regexp" "strings" "strconv" "io/ioutil" "sort" ) type Handler struct { DB *sql.DB tables map[string]Table } var ( entriesPattern = regexp.MustCompile(`^\/[^\/]+\/?$`) entryPattern = regexp.MustCompile(`^\/[^\/]+\/.+`) ) type ApiErrorResponse struct { Error string `json:"error"` } type ApiSuccessResponse struct { Response interface{} `json:"response"` } type Table struct { Name string PrimaryKey string Fields map[string]Field } type Field struct { Name string Type string Nullable bool AutoIncrement bool Default interface{} } const ( DEFAULT_LIMIT = 5 DEFAULT_OFFSET = 0 ) func InternalServerError(err error, w http.ResponseWriter) { fmt.Println(err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) } func errorResponse(status int, message string, w http.ResponseWriter) { res, err := json.Marshal(ApiErrorResponse{message}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func successResponse(status int, obj interface{}, w http.ResponseWriter) { res, err := json.Marshal(ApiSuccessResponse{obj}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func (h *Handler) ListTables(w http.ResponseWriter, r *http.Request) { var tables []string for k := range h.tables { tables = append(tables, k) } sort.Strings(tables) successResponse(http.StatusOK, map[string][]string{"tables": tables}, w) } func getLimitOffset(u *url.URL) (int, int) { q := u.Query() limit := DEFAULT_LIMIT offset := DEFAULT_OFFSET limitParam, _ := q["limit"] if len(limitParam) > 0 { limitInt, err := strconv.Atoi(limitParam[0]) if err == nil { limit = limitInt } } offsetParam, _ := q["offset"] if len(offsetParam) > 0 { offsetInt, err := strconv.Atoi(offsetParam[0]) if err == nil { offset = offsetInt } } return limit, offset } func (h *Handler) List(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusNotFound, err.Error(), w) return } limit, offset := getLimitOffset(r.URL) rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]interface{}{"records": result}, w) } func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) { columns, err := rows.Columns() if err != nil { return nil, err } count := len(columns) result := make([]map[string]interface{}, 0) for rows.Next() { values := make([]interface{}, count) valuePtrs := make([]interface{}, count) for i := range columns { valuePtrs[i] = &values[i] } err = rows.Scan(valuePtrs...) if err != nil { return nil, err } result = append(result, parseRow(table, columns, values)) } return result, nil } func (h *Handler) Show(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } if len(result) == 0 { errorResponse(http.StatusNotFound, "record not found", w) return } successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w) } func (h *Handler) Create(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} for _,field := range table.Fields { if field.AutoIncrement { continue } fields = append(fields, field.Name) value, _ := params[field.Name] if value == nil { values = append(values, field.Default) continue } values = append(values, value) } insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)" insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1) insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1) insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1) fmt.Println(insertSQL) _, err = h.DB.Exec(insertSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } rows, err := h.DB.Query("SELECT LAST_INSERT_ID()") if err != nil { InternalServerError(err, w) return } defer rows.Close() var key int for rows.Next() { err = rows.Scan(&key) if err != nil { InternalServerError(err, w) return } } successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w) } func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} fmt.Printf("Params: %v",params) for key, value := range params { field, ok := table.Fields[key] if !ok
// skip unknown fields fields = append(fields, field.Name) invalidTypeMessage := "field " + field.Name + " have invalid type" // update auto increment field does not allowed if field.AutoIncrement { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } switch { case value == nil && field.Nullable: values = append(values, nil) case field.Type == "string": val, ok := value.(string) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) case field.Type == "int": val, ok := value.(int) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) } } values = append(values, key) fmt.Printf("\nFields: %v",fields) fmt.Printf("\nValues: %v",values) updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?" updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1) updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1) updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1) fmt.Println() fmt.Printf(updateSQL, values...) res, err := h.DB.Exec(updateSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"updated": affected}, w) } func getKey(r *http.Request) (int, error) { params := strings.Split(r.URL.Path, "/") key, err := strconv.Atoi(params[2]) if err != nil { return 0, fmt.Errorf("Primary key must be int") } return key, nil } func getTable(r *http.Request, h *Handler) (*Table, error) { params := strings.Split(r.URL.Path, "/") tableName := params[1] table, ok := h.tables[tableName] if !ok { return nil, fmt.Errorf("unknown table") } return &table, nil } func strToBool(in string) bool { if in == "YES" { return true } return false } type NullString struct { Valid bool String string } func (s NullString) MarshalJSON() ([]byte, error) { if s.Valid { return json.Marshal(s.String) } return []byte("null"), nil } func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} { result := map[string]interface{}{} for i, val := range vals { fieldName := columns[i] field, ok := table.Fields[fieldName] if !ok { panic("Can't find field description") } switch { case field.Type == "string": b, ok := val.([]byte) if ok { result[fieldName] = NullString{true, string(b)} } else { result[fieldName] = NullString{false, ""} } case field.Type == "int": result[fieldName] = val } } return result } func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } id, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } res, err := h.DB.Exec("DELETE FROM " + table.Name + " WHERE id = ?;", id) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"deleted": affected}, w) } func (h *Handler) LoadMetadata() { h.tables = map[string]Table{} rows, err := h.DB.Query("SHOW TABLES;") if err != nil { panic(err) } defer rows.Close() tables := make([]string, 0) for rows.Next() { var table string err = rows.Scan(&table) if err != nil { panic(err) } tables = append(tables, table) } for _, tableName := range tables { table := Table{ Name: tableName, } rows, err := h.DB.Query("SHOW FULL COLUMNS FROM " + tableName + ";") if err != nil { panic(err) } var skipColumn sql.NullString fields := map[string]Field{} for rows.Next() { var fieldInfo FieldInfo err = rows.Scan( &fieldInfo.Name, &fieldInfo.Type, &skipColumn, &fieldInfo.Nullable, &fieldInfo.Key, &fieldInfo.Default, &fieldInfo.Extra, &skipColumn, &skipColumn, ) if err != nil { panic(err) } if fieldInfo.Key.Valid && fieldInfo.Key.String == "PRI" { table.PrimaryKey = fieldInfo.Name } field := Field{ Name: fieldInfo.Name, Type: sqlTypeToGolangType(fieldInfo.Type), Nullable: strToBool(fieldInfo.Nullable), AutoIncrement: fieldInfo.Extra.Valid && fieldInfo.Extra.String == "auto_increment", } if (fieldInfo.Default.Valid) { field.Default = fieldInfo.Default.String } if field.Default == nil && !field.Nullable { switch field.Type { case "string": field.Default = "" case "int": field.Default = 0 } } fields[fieldInfo.Name] = field } table.Fields = fields h.tables[tableName] = table } } type FieldInfo struct { Name string Type string Nullable string Key sql.NullString Extra sql.NullString Default sql.NullString } func sqlTypeToGolangType(sqlType string) string { switch { case strings.Contains(sqlType, "int"): return "int" case sqlType == "text" || strings.Contains(sqlType, "varchar"): return "string" default: fmt.Println("Unknown type: ", sqlType) return "string" } } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Println("Request: ", r.Method, r.URL.Path) switch { case r.Method == "GET" && r.URL.Path == "/": h.ListTables(w, r) case r.Method == "GET" && entriesPattern.MatchString(r.URL.Path): h.List(w, r) case r.Method == "GET" && entryPattern.MatchString(r.URL.Path): h.Show(w, r) case r.Method == "PUT" && entriesPattern.MatchString(r.URL.Path): h.Create(w, r) case r.Method == "POST" && entryPattern.MatchString(r.URL.Path): h.Edit(w, r) case r.Method == "DELETE" && entryPattern.MatchString(r.URL.Path): h.Delete(w, r) default: errorResponse(http.StatusNotFound, "unknown method", w) } } func NewDbExplorer(db *sql.DB) (http.Handler, error) { handler := Handler{DB: db} handler.LoadMetadata() return handler, nil }
{ continue }
conditional_block
db_explorer.go
package main import ( "net/http" "net/url" "database/sql" "encoding/json" "fmt" "regexp" "strings" "strconv" "io/ioutil" "sort" ) type Handler struct { DB *sql.DB tables map[string]Table } var ( entriesPattern = regexp.MustCompile(`^\/[^\/]+\/?$`) entryPattern = regexp.MustCompile(`^\/[^\/]+\/.+`) ) type ApiErrorResponse struct { Error string `json:"error"` } type ApiSuccessResponse struct { Response interface{} `json:"response"` } type Table struct { Name string PrimaryKey string Fields map[string]Field } type Field struct { Name string Type string Nullable bool AutoIncrement bool Default interface{} } const ( DEFAULT_LIMIT = 5 DEFAULT_OFFSET = 0 ) func InternalServerError(err error, w http.ResponseWriter) { fmt.Println(err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) } func errorResponse(status int, message string, w http.ResponseWriter) { res, err := json.Marshal(ApiErrorResponse{message}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func successResponse(status int, obj interface{}, w http.ResponseWriter) { res, err := json.Marshal(ApiSuccessResponse{obj}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func (h *Handler) ListTables(w http.ResponseWriter, r *http.Request) { var tables []string for k := range h.tables { tables = append(tables, k) } sort.Strings(tables) successResponse(http.StatusOK, map[string][]string{"tables": tables}, w) } func getLimitOffset(u *url.URL) (int, int) { q := u.Query() limit := DEFAULT_LIMIT offset := DEFAULT_OFFSET limitParam, _ := q["limit"] if len(limitParam) > 0 { limitInt, err := strconv.Atoi(limitParam[0]) if err == nil { limit = limitInt } } offsetParam, _ := q["offset"] if len(offsetParam) > 0 { offsetInt, err := strconv.Atoi(offsetParam[0]) if err == nil { offset = offsetInt } } return limit, offset } func (h *Handler) List(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusNotFound, err.Error(), w) return } limit, offset := getLimitOffset(r.URL) rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]interface{}{"records": result}, w) } func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) { columns, err := rows.Columns() if err != nil { return nil, err } count := len(columns) result := make([]map[string]interface{}, 0) for rows.Next() { values := make([]interface{}, count) valuePtrs := make([]interface{}, count) for i := range columns { valuePtrs[i] = &values[i] } err = rows.Scan(valuePtrs...) if err != nil { return nil, err } result = append(result, parseRow(table, columns, values)) } return result, nil } func (h *Handler) Show(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } if len(result) == 0 { errorResponse(http.StatusNotFound, "record not found", w) return } successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w) } func (h *Handler) Create(w http.ResponseWriter, r *http.Request)
func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} fmt.Printf("Params: %v",params) for key, value := range params { field, ok := table.Fields[key] if !ok { continue } // skip unknown fields fields = append(fields, field.Name) invalidTypeMessage := "field " + field.Name + " have invalid type" // update auto increment field does not allowed if field.AutoIncrement { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } switch { case value == nil && field.Nullable: values = append(values, nil) case field.Type == "string": val, ok := value.(string) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) case field.Type == "int": val, ok := value.(int) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) } } values = append(values, key) fmt.Printf("\nFields: %v",fields) fmt.Printf("\nValues: %v",values) updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?" updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1) updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1) updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1) fmt.Println() fmt.Printf(updateSQL, values...) res, err := h.DB.Exec(updateSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"updated": affected}, w) } func getKey(r *http.Request) (int, error) { params := strings.Split(r.URL.Path, "/") key, err := strconv.Atoi(params[2]) if err != nil { return 0, fmt.Errorf("Primary key must be int") } return key, nil } func getTable(r *http.Request, h *Handler) (*Table, error) { params := strings.Split(r.URL.Path, "/") tableName := params[1] table, ok := h.tables[tableName] if !ok { return nil, fmt.Errorf("unknown table") } return &table, nil } func strToBool(in string) bool { if in == "YES" { return true } return false } type NullString struct { Valid bool String string } func (s NullString) MarshalJSON() ([]byte, error) { if s.Valid { return json.Marshal(s.String) } return []byte("null"), nil } func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} { result := map[string]interface{}{} for i, val := range vals { fieldName := columns[i] field, ok := table.Fields[fieldName] if !ok { panic("Can't find field description") } switch { case field.Type == "string": b, ok := val.([]byte) if ok { result[fieldName] = NullString{true, string(b)} } else { result[fieldName] = NullString{false, ""} } case field.Type == "int": result[fieldName] = val } } return result } func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } id, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } res, err := h.DB.Exec("DELETE FROM " + table.Name + " WHERE id = ?;", id) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"deleted": affected}, w) } func (h *Handler) LoadMetadata() { h.tables = map[string]Table{} rows, err := h.DB.Query("SHOW TABLES;") if err != nil { panic(err) } defer rows.Close() tables := make([]string, 0) for rows.Next() { var table string err = rows.Scan(&table) if err != nil { panic(err) } tables = append(tables, table) } for _, tableName := range tables { table := Table{ Name: tableName, } rows, err := h.DB.Query("SHOW FULL COLUMNS FROM " + tableName + ";") if err != nil { panic(err) } var skipColumn sql.NullString fields := map[string]Field{} for rows.Next() { var fieldInfo FieldInfo err = rows.Scan( &fieldInfo.Name, &fieldInfo.Type, &skipColumn, &fieldInfo.Nullable, &fieldInfo.Key, &fieldInfo.Default, &fieldInfo.Extra, &skipColumn, &skipColumn, ) if err != nil { panic(err) } if fieldInfo.Key.Valid && fieldInfo.Key.String == "PRI" { table.PrimaryKey = fieldInfo.Name } field := Field{ Name: fieldInfo.Name, Type: sqlTypeToGolangType(fieldInfo.Type), Nullable: strToBool(fieldInfo.Nullable), AutoIncrement: fieldInfo.Extra.Valid && fieldInfo.Extra.String == "auto_increment", } if (fieldInfo.Default.Valid) { field.Default = fieldInfo.Default.String } if field.Default == nil && !field.Nullable { switch field.Type { case "string": field.Default = "" case "int": field.Default = 0 } } fields[fieldInfo.Name] = field } table.Fields = fields h.tables[tableName] = table } } type FieldInfo struct { Name string Type string Nullable string Key sql.NullString Extra sql.NullString Default sql.NullString } func sqlTypeToGolangType(sqlType string) string { switch { case strings.Contains(sqlType, "int"): return "int" case sqlType == "text" || strings.Contains(sqlType, "varchar"): return "string" default: fmt.Println("Unknown type: ", sqlType) return "string" } } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Println("Request: ", r.Method, r.URL.Path) switch { case r.Method == "GET" && r.URL.Path == "/": h.ListTables(w, r) case r.Method == "GET" && entriesPattern.MatchString(r.URL.Path): h.List(w, r) case r.Method == "GET" && entryPattern.MatchString(r.URL.Path): h.Show(w, r) case r.Method == "PUT" && entriesPattern.MatchString(r.URL.Path): h.Create(w, r) case r.Method == "POST" && entryPattern.MatchString(r.URL.Path): h.Edit(w, r) case r.Method == "DELETE" && entryPattern.MatchString(r.URL.Path): h.Delete(w, r) default: errorResponse(http.StatusNotFound, "unknown method", w) } } func NewDbExplorer(db *sql.DB) (http.Handler, error) { handler := Handler{DB: db} handler.LoadMetadata() return handler, nil }
{ table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} for _,field := range table.Fields { if field.AutoIncrement { continue } fields = append(fields, field.Name) value, _ := params[field.Name] if value == nil { values = append(values, field.Default) continue } values = append(values, value) } insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)" insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1) insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1) insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1) fmt.Println(insertSQL) _, err = h.DB.Exec(insertSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } rows, err := h.DB.Query("SELECT LAST_INSERT_ID()") if err != nil { InternalServerError(err, w) return } defer rows.Close() var key int for rows.Next() { err = rows.Scan(&key) if err != nil { InternalServerError(err, w) return } } successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w) }
identifier_body
db_explorer.go
package main import ( "net/http" "net/url" "database/sql" "encoding/json" "fmt" "regexp" "strings" "strconv" "io/ioutil" "sort" ) type Handler struct { DB *sql.DB tables map[string]Table } var ( entriesPattern = regexp.MustCompile(`^\/[^\/]+\/?$`) entryPattern = regexp.MustCompile(`^\/[^\/]+\/.+`) ) type ApiErrorResponse struct { Error string `json:"error"` } type ApiSuccessResponse struct { Response interface{} `json:"response"` } type Table struct { Name string PrimaryKey string Fields map[string]Field } type Field struct { Name string Type string Nullable bool AutoIncrement bool Default interface{} } const ( DEFAULT_LIMIT = 5 DEFAULT_OFFSET = 0 ) func InternalServerError(err error, w http.ResponseWriter) { fmt.Println(err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) } func errorResponse(status int, message string, w http.ResponseWriter) { res, err := json.Marshal(ApiErrorResponse{message}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func successResponse(status int, obj interface{}, w http.ResponseWriter) { res, err := json.Marshal(ApiSuccessResponse{obj}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func (h *Handler) ListTables(w http.ResponseWriter, r *http.Request) { var tables []string for k := range h.tables { tables = append(tables, k) } sort.Strings(tables) successResponse(http.StatusOK, map[string][]string{"tables": tables}, w) } func getLimitOffset(u *url.URL) (int, int) { q := u.Query() limit := DEFAULT_LIMIT offset := DEFAULT_OFFSET limitParam, _ := q["limit"] if len(limitParam) > 0 { limitInt, err := strconv.Atoi(limitParam[0]) if err == nil { limit = limitInt } } offsetParam, _ := q["offset"] if len(offsetParam) > 0 { offsetInt, err := strconv.Atoi(offsetParam[0]) if err == nil { offset = offsetInt } } return limit, offset } func (h *Handler) List(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusNotFound, err.Error(), w) return } limit, offset := getLimitOffset(r.URL) rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]interface{}{"records": result}, w) } func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) { columns, err := rows.Columns() if err != nil { return nil, err } count := len(columns) result := make([]map[string]interface{}, 0) for rows.Next() { values := make([]interface{}, count) valuePtrs := make([]interface{}, count) for i := range columns { valuePtrs[i] = &values[i] } err = rows.Scan(valuePtrs...) if err != nil { return nil, err } result = append(result, parseRow(table, columns, values)) } return result, nil } func (h *Handler) Show(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key) if err != nil { InternalServerError(err, w)
return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } if len(result) == 0 { errorResponse(http.StatusNotFound, "record not found", w) return } successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w) } func (h *Handler) Create(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} for _,field := range table.Fields { if field.AutoIncrement { continue } fields = append(fields, field.Name) value, _ := params[field.Name] if value == nil { values = append(values, field.Default) continue } values = append(values, value) } insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)" insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1) insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1) insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1) fmt.Println(insertSQL) _, err = h.DB.Exec(insertSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } rows, err := h.DB.Query("SELECT LAST_INSERT_ID()") if err != nil { InternalServerError(err, w) return } defer rows.Close() var key int for rows.Next() { err = rows.Scan(&key) if err != nil { InternalServerError(err, w) return } } successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w) } func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} fmt.Printf("Params: %v",params) for key, value := range params { field, ok := table.Fields[key] if !ok { continue } // skip unknown fields fields = append(fields, field.Name) invalidTypeMessage := "field " + field.Name + " have invalid type" // update auto increment field does not allowed if field.AutoIncrement { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } switch { case value == nil && field.Nullable: values = append(values, nil) case field.Type == "string": val, ok := value.(string) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) case field.Type == "int": val, ok := value.(int) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) } } values = append(values, key) fmt.Printf("\nFields: %v",fields) fmt.Printf("\nValues: %v",values) updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?" updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1) updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1) updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1) fmt.Println() fmt.Printf(updateSQL, values...) res, err := h.DB.Exec(updateSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"updated": affected}, w) } func getKey(r *http.Request) (int, error) { params := strings.Split(r.URL.Path, "/") key, err := strconv.Atoi(params[2]) if err != nil { return 0, fmt.Errorf("Primary key must be int") } return key, nil } func getTable(r *http.Request, h *Handler) (*Table, error) { params := strings.Split(r.URL.Path, "/") tableName := params[1] table, ok := h.tables[tableName] if !ok { return nil, fmt.Errorf("unknown table") } return &table, nil } func strToBool(in string) bool { if in == "YES" { return true } return false } type NullString struct { Valid bool String string } func (s NullString) MarshalJSON() ([]byte, error) { if s.Valid { return json.Marshal(s.String) } return []byte("null"), nil } func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} { result := map[string]interface{}{} for i, val := range vals { fieldName := columns[i] field, ok := table.Fields[fieldName] if !ok { panic("Can't find field description") } switch { case field.Type == "string": b, ok := val.([]byte) if ok { result[fieldName] = NullString{true, string(b)} } else { result[fieldName] = NullString{false, ""} } case field.Type == "int": result[fieldName] = val } } return result } func (h *Handler) Delete(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } id, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } res, err := h.DB.Exec("DELETE FROM " + table.Name + " WHERE id = ?;", id) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"deleted": affected}, w) } func (h *Handler) LoadMetadata() { h.tables = map[string]Table{} rows, err := h.DB.Query("SHOW TABLES;") if err != nil { panic(err) } defer rows.Close() tables := make([]string, 0) for rows.Next() { var table string err = rows.Scan(&table) if err != nil { panic(err) } tables = append(tables, table) } for _, tableName := range tables { table := Table{ Name: tableName, } rows, err := h.DB.Query("SHOW FULL COLUMNS FROM " + tableName + ";") if err != nil { panic(err) } var skipColumn sql.NullString fields := map[string]Field{} for rows.Next() { var fieldInfo FieldInfo err = rows.Scan( &fieldInfo.Name, &fieldInfo.Type, &skipColumn, &fieldInfo.Nullable, &fieldInfo.Key, &fieldInfo.Default, &fieldInfo.Extra, &skipColumn, &skipColumn, ) if err != nil { panic(err) } if fieldInfo.Key.Valid && fieldInfo.Key.String == "PRI" { table.PrimaryKey = fieldInfo.Name } field := Field{ Name: fieldInfo.Name, Type: sqlTypeToGolangType(fieldInfo.Type), Nullable: strToBool(fieldInfo.Nullable), AutoIncrement: fieldInfo.Extra.Valid && fieldInfo.Extra.String == "auto_increment", } if (fieldInfo.Default.Valid) { field.Default = fieldInfo.Default.String } if field.Default == nil && !field.Nullable { switch field.Type { case "string": field.Default = "" case "int": field.Default = 0 } } fields[fieldInfo.Name] = field } table.Fields = fields h.tables[tableName] = table } } type FieldInfo struct { Name string Type string Nullable string Key sql.NullString Extra sql.NullString Default sql.NullString } func sqlTypeToGolangType(sqlType string) string { switch { case strings.Contains(sqlType, "int"): return "int" case sqlType == "text" || strings.Contains(sqlType, "varchar"): return "string" default: fmt.Println("Unknown type: ", sqlType) return "string" } } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Println("Request: ", r.Method, r.URL.Path) switch { case r.Method == "GET" && r.URL.Path == "/": h.ListTables(w, r) case r.Method == "GET" && entriesPattern.MatchString(r.URL.Path): h.List(w, r) case r.Method == "GET" && entryPattern.MatchString(r.URL.Path): h.Show(w, r) case r.Method == "PUT" && entriesPattern.MatchString(r.URL.Path): h.Create(w, r) case r.Method == "POST" && entryPattern.MatchString(r.URL.Path): h.Edit(w, r) case r.Method == "DELETE" && entryPattern.MatchString(r.URL.Path): h.Delete(w, r) default: errorResponse(http.StatusNotFound, "unknown method", w) } } func NewDbExplorer(db *sql.DB) (http.Handler, error) { handler := Handler{DB: db} handler.LoadMetadata() return handler, nil }
random_line_split
db_explorer.go
package main import ( "net/http" "net/url" "database/sql" "encoding/json" "fmt" "regexp" "strings" "strconv" "io/ioutil" "sort" ) type Handler struct { DB *sql.DB tables map[string]Table } var ( entriesPattern = regexp.MustCompile(`^\/[^\/]+\/?$`) entryPattern = regexp.MustCompile(`^\/[^\/]+\/.+`) ) type ApiErrorResponse struct { Error string `json:"error"` } type ApiSuccessResponse struct { Response interface{} `json:"response"` } type Table struct { Name string PrimaryKey string Fields map[string]Field } type Field struct { Name string Type string Nullable bool AutoIncrement bool Default interface{} } const ( DEFAULT_LIMIT = 5 DEFAULT_OFFSET = 0 ) func InternalServerError(err error, w http.ResponseWriter) { fmt.Println(err) w.WriteHeader(http.StatusInternalServerError) w.Write([]byte(err.Error())) } func errorResponse(status int, message string, w http.ResponseWriter) { res, err := json.Marshal(ApiErrorResponse{message}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func successResponse(status int, obj interface{}, w http.ResponseWriter) { res, err := json.Marshal(ApiSuccessResponse{obj}) if err != nil { InternalServerError(err, w) return } w.WriteHeader(status) w.Write(res) } func (h *Handler) ListTables(w http.ResponseWriter, r *http.Request) { var tables []string for k := range h.tables { tables = append(tables, k) } sort.Strings(tables) successResponse(http.StatusOK, map[string][]string{"tables": tables}, w) } func getLimitOffset(u *url.URL) (int, int) { q := u.Query() limit := DEFAULT_LIMIT offset := DEFAULT_OFFSET limitParam, _ := q["limit"] if len(limitParam) > 0 { limitInt, err := strconv.Atoi(limitParam[0]) if err == nil { limit = limitInt } } offsetParam, _ := q["offset"] if len(offsetParam) > 0 { offsetInt, err := strconv.Atoi(offsetParam[0]) if err == nil { offset = offsetInt } } return limit, offset } func (h *Handler) List(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusNotFound, err.Error(), w) return } limit, offset := getLimitOffset(r.URL) rows, err := h.DB.Query("SELECT * FROM " + table.Name + " LIMIT ? OFFSET ?", limit, offset) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]interface{}{"records": result}, w) } func parseResponse(rows *sql.Rows, table *Table) ([]map[string]interface{}, error) { columns, err := rows.Columns() if err != nil { return nil, err } count := len(columns) result := make([]map[string]interface{}, 0) for rows.Next() { values := make([]interface{}, count) valuePtrs := make([]interface{}, count) for i := range columns { valuePtrs[i] = &values[i] } err = rows.Scan(valuePtrs...) if err != nil { return nil, err } result = append(result, parseRow(table, columns, values)) } return result, nil } func (h *Handler) Show(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } rows, err := h.DB.Query("SELECT * FROM " + table.Name + " WHERE " + table.PrimaryKey + " = ? LIMIT 1", key) if err != nil { InternalServerError(err, w) return } defer rows.Close() result, err := parseResponse(rows, table) if err != nil { InternalServerError(err, w) return } if len(result) == 0 { errorResponse(http.StatusNotFound, "record not found", w) return } successResponse(http.StatusOK, map[string]interface{}{"record": result[0]}, w) } func (h *Handler) Create(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} for _,field := range table.Fields { if field.AutoIncrement { continue } fields = append(fields, field.Name) value, _ := params[field.Name] if value == nil { values = append(values, field.Default) continue } values = append(values, value) } insertSQL := "INSERT INTO $table_name (`$fields`) VALUES ($values)" insertSQL = strings.Replace(insertSQL, "$table_name", table.Name, 1) insertSQL = strings.Replace(insertSQL, "$fields", strings.Join(fields, "`, `"), 1) insertSQL = strings.Replace(insertSQL, "$values", strings.Repeat("?, ", len(fields) - 1) + "?", 1) fmt.Println(insertSQL) _, err = h.DB.Exec(insertSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } rows, err := h.DB.Query("SELECT LAST_INSERT_ID()") if err != nil { InternalServerError(err, w) return } defer rows.Close() var key int for rows.Next() { err = rows.Scan(&key) if err != nil { InternalServerError(err, w) return } } successResponse(http.StatusOK, map[string]int{table.PrimaryKey: key}, w) } func (h *Handler) Edit(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } key, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } body, err := ioutil.ReadAll(r.Body) defer r.Body.Close() if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var params map[string]interface{} err = json.Unmarshal(body, &params) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } var fields []string var values []interface{} fmt.Printf("Params: %v",params) for key, value := range params { field, ok := table.Fields[key] if !ok { continue } // skip unknown fields fields = append(fields, field.Name) invalidTypeMessage := "field " + field.Name + " have invalid type" // update auto increment field does not allowed if field.AutoIncrement { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } switch { case value == nil && field.Nullable: values = append(values, nil) case field.Type == "string": val, ok := value.(string) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) case field.Type == "int": val, ok := value.(int) if !ok { errorResponse(http.StatusBadRequest, invalidTypeMessage, w) return } values = append(values, val) } } values = append(values, key) fmt.Printf("\nFields: %v",fields) fmt.Printf("\nValues: %v",values) updateSQL := "UPDATE $table_name SET $fields = ? WHERE $primary_key = ?" updateSQL = strings.Replace(updateSQL, "$table_name", table.Name, 1) updateSQL = strings.Replace(updateSQL, "$primary_key", table.PrimaryKey, 1) updateSQL = strings.Replace(updateSQL, "$fields", strings.Join(fields, " = ?, "), 1) fmt.Println() fmt.Printf(updateSQL, values...) res, err := h.DB.Exec(updateSQL, values...) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"updated": affected}, w) } func getKey(r *http.Request) (int, error) { params := strings.Split(r.URL.Path, "/") key, err := strconv.Atoi(params[2]) if err != nil { return 0, fmt.Errorf("Primary key must be int") } return key, nil } func getTable(r *http.Request, h *Handler) (*Table, error) { params := strings.Split(r.URL.Path, "/") tableName := params[1] table, ok := h.tables[tableName] if !ok { return nil, fmt.Errorf("unknown table") } return &table, nil } func strToBool(in string) bool { if in == "YES" { return true } return false } type NullString struct { Valid bool String string } func (s NullString) MarshalJSON() ([]byte, error) { if s.Valid { return json.Marshal(s.String) } return []byte("null"), nil } func parseRow(table *Table, columns []string, vals []interface{}) map[string]interface{} { result := map[string]interface{}{} for i, val := range vals { fieldName := columns[i] field, ok := table.Fields[fieldName] if !ok { panic("Can't find field description") } switch { case field.Type == "string": b, ok := val.([]byte) if ok { result[fieldName] = NullString{true, string(b)} } else { result[fieldName] = NullString{false, ""} } case field.Type == "int": result[fieldName] = val } } return result } func (h *Handler)
(w http.ResponseWriter, r *http.Request) { table, err := getTable(r, h) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } id, err := getKey(r) if err != nil { errorResponse(http.StatusBadRequest, err.Error(), w) return } res, err := h.DB.Exec("DELETE FROM " + table.Name + " WHERE id = ?;", id) if err != nil { fmt.Println(err) InternalServerError(err, w) return } affected, err := res.RowsAffected() if err != nil { InternalServerError(err, w) return } successResponse(http.StatusOK, map[string]int64{"deleted": affected}, w) } func (h *Handler) LoadMetadata() { h.tables = map[string]Table{} rows, err := h.DB.Query("SHOW TABLES;") if err != nil { panic(err) } defer rows.Close() tables := make([]string, 0) for rows.Next() { var table string err = rows.Scan(&table) if err != nil { panic(err) } tables = append(tables, table) } for _, tableName := range tables { table := Table{ Name: tableName, } rows, err := h.DB.Query("SHOW FULL COLUMNS FROM " + tableName + ";") if err != nil { panic(err) } var skipColumn sql.NullString fields := map[string]Field{} for rows.Next() { var fieldInfo FieldInfo err = rows.Scan( &fieldInfo.Name, &fieldInfo.Type, &skipColumn, &fieldInfo.Nullable, &fieldInfo.Key, &fieldInfo.Default, &fieldInfo.Extra, &skipColumn, &skipColumn, ) if err != nil { panic(err) } if fieldInfo.Key.Valid && fieldInfo.Key.String == "PRI" { table.PrimaryKey = fieldInfo.Name } field := Field{ Name: fieldInfo.Name, Type: sqlTypeToGolangType(fieldInfo.Type), Nullable: strToBool(fieldInfo.Nullable), AutoIncrement: fieldInfo.Extra.Valid && fieldInfo.Extra.String == "auto_increment", } if (fieldInfo.Default.Valid) { field.Default = fieldInfo.Default.String } if field.Default == nil && !field.Nullable { switch field.Type { case "string": field.Default = "" case "int": field.Default = 0 } } fields[fieldInfo.Name] = field } table.Fields = fields h.tables[tableName] = table } } type FieldInfo struct { Name string Type string Nullable string Key sql.NullString Extra sql.NullString Default sql.NullString } func sqlTypeToGolangType(sqlType string) string { switch { case strings.Contains(sqlType, "int"): return "int" case sqlType == "text" || strings.Contains(sqlType, "varchar"): return "string" default: fmt.Println("Unknown type: ", sqlType) return "string" } } func (h Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { fmt.Println("Request: ", r.Method, r.URL.Path) switch { case r.Method == "GET" && r.URL.Path == "/": h.ListTables(w, r) case r.Method == "GET" && entriesPattern.MatchString(r.URL.Path): h.List(w, r) case r.Method == "GET" && entryPattern.MatchString(r.URL.Path): h.Show(w, r) case r.Method == "PUT" && entriesPattern.MatchString(r.URL.Path): h.Create(w, r) case r.Method == "POST" && entryPattern.MatchString(r.URL.Path): h.Edit(w, r) case r.Method == "DELETE" && entryPattern.MatchString(r.URL.Path): h.Delete(w, r) default: errorResponse(http.StatusNotFound, "unknown method", w) } } func NewDbExplorer(db *sql.DB) (http.Handler, error) { handler := Handler{DB: db} handler.LoadMetadata() return handler, nil }
Delete
identifier_name
DQN_1.py
import numpy as np import pandas as pd import time import tkinter as tk import tensorflow.compat.v1 as tf ''' 4*4 的迷宫: --------------------------- | 入口 | | | | --------------------------- | | | 陷阱 | | --------------------------- | | 陷阱 | 终点 | | --------------------------- | | | | | --------------------------- ''' UNIT = 40 # pixels MAZE_H = 4 # grid height MAZE_W = 4 # grid width class Maze(tk.Tk, object): def __init__(self): super(Maze, self).__init__() self.action_space = ['u', 'd', 'l', 'r'] self.n_actions = len(self.action_space) self.n_features = 2 self.title('maze') self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT)) self._build_maze() def _build_maze(self): self.canvas = tk.Canvas(self, bg='white', height=MAZE_H * UNIT, width=MAZE_W * UNIT) # create grids for c in range(0, MAZE_W * UNIT, UNIT): x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT self.canvas.create_line(x0, y0, x1, y1) for r in range(0, MAZE_H * UNIT, UNIT): x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r self.canvas.create_line(x0, y0, x1, y1) # create origin origin = np.array([20, 20]) # hell hell1_center = origin + np.array([UNIT * 2, UNIT]) self.hell1 = self.canvas.create_rectangle( hell1_center[0] - 15, hell1_center[1] - 15, hell1_center[0] + 15, hell1_center[1] + 15, fill='black') # hell # hell2_center = origin + np.array([UNIT, UNIT * 2]) # self.hell2 = self.canvas.create_rectangle( # hell2_center[0] - 15, hell2_center[1] - 15, # hell2_center[0] + 15, hell2_center[1] + 15, # fill='black') # create oval oval_center = origin + UNIT * 2 self.oval = self.canvas.create_oval( oval_center[0] - 15, oval_center[1] - 15, oval_center[0] + 15, oval_center[1] + 15, fill='yellow') # create red rect self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # pack all self.canvas.pack() def reset(self): self.update() time.sleep(0.1) self.canvas.delete(self.rect) origin = np.array([20, 20]) self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # return observation return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / ( MAZE_H * UNIT) def step(self, action): s = self.canvas.coords(self.rect) base_action = np.array([0, 0]) if action == 0: # up if s[1] > UNIT: base_action[1] -= UNIT elif action == 1: # down if s[1] < (MAZE_H - 1) * UNIT: base_action[1] += UNIT elif action == 2: # right if s[0] < (MAZE_W - 1) * UNIT: base_action[0] += UNIT elif action == 3: # left if s[0] > UNIT: base_action[0] -= UNIT self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent next_coords = self.canvas.coords(self.rect) # next state # reward function if next_coords == self.canvas.coords(self.oval): reward = 1 done = True elif next_coords in [self.canvas.coords(self.hell1)]: reward = -1 done = True else: reward = 0 done = False s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT) return s_, reward, done def render(self): #
leep(0.01) self.update() np.random.seed(1) tf.set_random_seed(1) class DeepQNetwork: # 建立神经网络 def _build_net(self): # -------------- 创建 eval 神经网络, 及时提升参数 -------------- tf.compat.v1.disable_eager_execution() self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到 with tf.variable_scope('eval_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names, n_l1, w_initializer, b_initializer = \ ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \ tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers # eval_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1) # eval_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_eval = tf.matmul(l1, w2) + b2 with tf.variable_scope('loss'): # 求误差 self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval)) with tf.variable_scope('train'): # 梯度下降 self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss) # ---------------- 创建 target 神经网络, 提供 target Q --------------------- self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation with tf.variable_scope('target_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES] # target_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1) # target_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_next = tf.matmul(l1, w2) + b2 def __init__( self, n_actions, n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=300, memory_size=500, batch_size=32, e_greedy_increment=None, output_graph=False, ): self.n_actions = n_actions self.n_features = n_features self.lr = learning_rate self.gamma = reward_decay self.epsilon_max = e_greedy # epsilon 的最大值 self.replace_target_iter = replace_target_iter # 更换 target_net 的步数 self.memory_size = memory_size # 记忆上限 self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来 self.epsilon_increment = e_greedy_increment # epsilon 的增量 self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数 # 记录学习次数 (用于判断是否更换 target_net 参数) self.learn_step_counter = 0 # 初始化全 0 记忆 [s, a, r, s_] self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy # 创建 [target_net, evaluate_net] self._build_net() # 替换 target net 的参数 t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数 e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数 self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数 self.sess = tf.Session() # 输出 tensorboard 文件 if output_graph: # $ tensorboard --logdir=logs tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看 def store_transition(self, s, a, r, s_): if not hasattr(self, 'memory_counter'): self.memory_counter = 0 # 记录一条 [s, a, r, s_] 记录 transition = np.hstack((s, [a, r], s_)) # 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换 index = self.memory_counter % self.memory_size self.memory[index, :] = transition # 替换过程 self.memory_counter += 1 def choose_action(self, observation): # 统一 observation 的 shape (1, size_of_observation) observation = observation[np.newaxis, :] if np.random.uniform() < self.epsilon: # 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation}) action = np.argmax(actions_value) else: action = np.random.randint(0, self.n_actions) # 随机选择 return action def learn(self): # 检查是否替换 target_net 参数 if self.learn_step_counter % self.replace_target_iter == 0: self.sess.run(self.replace_target_op) print('\ntarget_params_replaced\n') # 从 memory 中随机抽取 batch_size 这么多记忆 if self.memory_counter > self.memory_size: sample_index = np.random.choice(self.memory_size, size=self.batch_size) else: sample_index = np.random.choice(self.memory_counter, size=self.batch_size) batch_memory = self.memory[sample_index, :] # 获取 q_next (target_net 产生了 q) 和 q_eval(eval_net 产生的 q) q_next, q_eval = self.sess.run( [self.q_next, self.q_eval], feed_dict={ self.s_: batch_memory[:, -self.n_features:], self.s: batch_memory[:, :self.n_features] }) # 下面这几步十分重要. q_next, q_eval 包含所有 action 的值, # 而我们需要的只是已经选择好的 action 的值, 其他的并不需要. # 所以我们将其他的 action 值全变成 0, 将用到的 action 误差值 反向传递回去, 作为更新凭据. # 这是我们最终要达到的样子, 比如 q_target - q_eval = [1, 0, 0] - [-1, 0, 0] = [2, 0, 0] # q_eval = [-1, 0, 0] 表示这一个记忆中有我选用过 action 0, 而 action 0 带来的 Q(s, a0) = -1, 所以其他的 Q(s, a1) = Q(s, a2) = 0. # q_target = [1, 0, 0] 表示这个记忆中的 r+gamma*maxQ(s_) = 1, 而且不管在 s_ 上我们取了哪个 action, # 我们都需要对应上 q_eval 中的 action 位置, 所以就将 1 放在了 action 0 的位置. # 下面也是为了达到上面说的目的, 不过为了更方面让程序运算, 达到目的的过程有点不同. # 是将 q_eval 全部赋值给 q_target, 这时 q_target-q_eval 全为 0, # 不过 我们再根据 batch_memory 当中的 action 这个 column 来给 q_target 中的对应的 memory-action 位置来修改赋值. # 使新的赋值为 reward + gamma * maxQ(s_), 这样 q_target-q_eval 就可以变成我们所需的样子. # 具体在下面还有一个举例说明. q_target = q_eval.copy() batch_index = np.arange(self.batch_size, dtype=np.int32) eval_act_index = batch_memory[:, self.n_features].astype(int) reward = batch_memory[:, self.n_features + 1] q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1) """ 假如在这个 batch 中, 我们有2个提取的记忆, 根据每个记忆可以生产3个 action 的值: q_eval = [[1, 2, 3], [4, 5, 6]] q_target = q_eval = [[1, 2, 3], [4, 5, 6]] 然后根据 memory 当中的具体 action 位置来修改 q_target 对应 action 上的值: 比如在: 记忆 0 的 q_target 计算值是 -1, 而且我用了 action 0; 记忆 1 的 q_target 计算值是 -2, 而且我用了 action 2: q_target = [[-1, 2, 3], [4, 5, -2]] 所以 (q_target - q_eval) 就变成了: [[(-1)-(1), 0, 0], [0, 0, (-2)-(6)]] 最后我们将这个 (q_target - q_eval) 当成误差, 反向传递会神经网络. 所有为 0 的 action 值是当时没有选择的 action, 之前有选择的 action 才有不为0的值. 我们只反向传递之前选择的 action 的值, """ # 训练 eval_net _, self.cost = self.sess.run([self._train_op, self.loss], feed_dict={self.s: batch_memory[:, :self.n_features], self.q_target: q_target}) self.cost_his.append(self.cost) # 记录 cost 误差 # 逐渐增加 epsilon, 降低行为的随机性 self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max self.learn_step_counter += 1 def plot_cost(self): import matplotlib.pyplot as plt plt.plot(np.arange(len(self.cost_his)), self.cost_his) plt.ylabel('Cost') plt.xlabel('training steps') plt.show() def run_maze(): step = 0 # 用来控制什么时候学习 for episode in range(300): # 初始化环境 observation = env.reset() while True: # 刷新环境 env.render() # DQN 根据观测值选择行为 action = RL.choose_action(observation) # 环境根据行为给出下一个 state, reward, 是否终止 observation_, reward, done = env.step(action) # DQN 存储记忆 RL.store_transition(observation, action, reward, observation_) # 控制学习起始时间和频率 (先累积一些记忆再开始学习) if (step > 200) and (step % 5 == 0): RL.learn() # 将下一个 state_ 变为 下次循环的 state observation = observation_ # 如果终止, 就跳出循环 if done: break step += 1 # 总步数 # end of game print('game over') env.destroy() if __name__ == "__main__": env = Maze() RL = DeepQNetwork(env.n_actions, env.n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=200, # 每 200 步替换一次 target_net 的参数 memory_size=2000, # 记忆上限 # output_graph=True # 是否输出 tensorboard 文件 ) env.after(100, run_maze) env.mainloop() RL.plot_cost() # 观看神经网络的误差曲线
time.s
identifier_name
DQN_1.py
import numpy as np import pandas as pd import time import tkinter as tk import tensorflow.compat.v1 as tf ''' 4*4 的迷宫: --------------------------- | 入口 | | | | --------------------------- | | | 陷阱 | | --------------------------- | | 陷阱 | 终点 | | --------------------------- | | | | | --------------------------- ''' UNIT = 40 # pixels MAZE_H = 4 # grid height MAZE_W = 4 # grid width class Maze(tk.Tk, object): def __init__(self): super(Maze, self).__init__() self.action_space = ['u', 'd', 'l', 'r'] self.n_actions = len(self.action_space) self.n_features = 2 self.title('maze') self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT)) self._build_maze() def _build_maze(self): self.canvas = tk.Canvas(self, bg='white', height=MAZE_H * UNIT, width=MAZE_W * UNIT) # create grids for c in range(0, MAZE_W * UNIT, UNIT): x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT self.canvas.create_line(x0, y0, x1, y1) for r in range(0, MAZE_H * UNIT, UNIT): x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r self.canvas.create_line(x0, y0, x1, y1) # create origin origin = np.array([20, 20]) # hell hell1_center = origin + np.array([UNIT * 2, UNIT]) self.hell1 = self.canvas.create_rectangle( hell1_center[0] - 15, hell1_center[1] - 15, hell1_center[0] + 15, hell1_center[1] + 15, fill='black') # hell # hell2_center = origin + np.array([UNIT, UNIT * 2]) # self.hell2 = self.canvas.create_rectangle( # hell2_center[0] - 15, hell2_center[1] - 15, # hell2_center[0] + 15, hell2_center[1] + 15, # fill='black') # create oval oval_center = origin + UNIT * 2 self.oval = self.canvas.create_oval( oval_center[0] - 15, oval_center[1] - 15, oval_center[0] + 15, oval_center[1] + 15, fill='yellow') # create red rect self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # pack all self.canvas.pack() def reset(self): self.update() time.sleep(0.1) self.canvas.delete(self.rect) origin = np.array([20, 20]) self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # return observation return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / ( MAZE_H * UNIT) def step(self, action): s = self.canvas.coords(self.rect) base_action = np.array([0, 0]) if action == 0: # up if s[1] > UNIT: base_action[1] -= UNIT elif action == 1: # down if s[1] < (MAZE_H - 1) * UNIT: base_action[1] += UNIT elif action == 2: # right if s[0] < (MAZE_W - 1) * UNIT: base_action[0] += UNIT elif action == 3: # left if s[0] > UNIT: base_action[0] -= UNIT
ve(self.rect, base_action[0], base_action[1]) # move agent next_coords = self.canvas.coords(self.rect) # next state # reward function if next_coords == self.canvas.coords(self.oval): reward = 1 done = True elif next_coords in [self.canvas.coords(self.hell1)]: reward = -1 done = True else: reward = 0 done = False s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT) return s_, reward, done def render(self): # time.sleep(0.01) self.update() np.random.seed(1) tf.set_random_seed(1) class DeepQNetwork: # 建立神经网络 def _build_net(self): # -------------- 创建 eval 神经网络, 及时提升参数 -------------- tf.compat.v1.disable_eager_execution() self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到 with tf.variable_scope('eval_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names, n_l1, w_initializer, b_initializer = \ ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \ tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers # eval_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1) # eval_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_eval = tf.matmul(l1, w2) + b2 with tf.variable_scope('loss'): # 求误差 self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval)) with tf.variable_scope('train'): # 梯度下降 self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss) # ---------------- 创建 target 神经网络, 提供 target Q --------------------- self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation with tf.variable_scope('target_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES] # target_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1) # target_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_next = tf.matmul(l1, w2) + b2 def __init__( self, n_actions, n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=300, memory_size=500, batch_size=32, e_greedy_increment=None, output_graph=False, ): self.n_actions = n_actions self.n_features = n_features self.lr = learning_rate self.gamma = reward_decay self.epsilon_max = e_greedy # epsilon 的最大值 self.replace_target_iter = replace_target_iter # 更换 target_net 的步数 self.memory_size = memory_size # 记忆上限 self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来 self.epsilon_increment = e_greedy_increment # epsilon 的增量 self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数 # 记录学习次数 (用于判断是否更换 target_net 参数) self.learn_step_counter = 0 # 初始化全 0 记忆 [s, a, r, s_] self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy # 创建 [target_net, evaluate_net] self._build_net() # 替换 target net 的参数 t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数 e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数 self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数 self.sess = tf.Session() # 输出 tensorboard 文件 if output_graph: # $ tensorboard --logdir=logs tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看 def store_transition(self, s, a, r, s_): if not hasattr(self, 'memory_counter'): self.memory_counter = 0 # 记录一条 [s, a, r, s_] 记录 transition = np.hstack((s, [a, r], s_)) # 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换 index = self.memory_counter % self.memory_size self.memory[index, :] = transition # 替换过程 self.memory_counter += 1 def choose_action(self, observation): # 统一 observation 的 shape (1, size_of_observation) observation = observation[np.newaxis, :] if np.random.uniform() < self.epsilon: # 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation}) action = np.argmax(actions_value) else: action = np.random.randint(0, self.n_actions) # 随机选择 return action def learn(self): # 检查是否替换 target_net 参数 if self.learn_step_counter % self.replace_target_iter == 0: self.sess.run(self.replace_target_op) print('\ntarget_params_replaced\n') # 从 memory 中随机抽取 batch_size 这么多记忆 if self.memory_counter > self.memory_size: sample_index = np.random.choice(self.memory_size, size=self.batch_size) else: sample_index = np.random.choice(self.memory_counter, size=self.batch_size) batch_memory = self.memory[sample_index, :] # 获取 q_next (target_net 产生了 q) 和 q_eval(eval_net 产生的 q) q_next, q_eval = self.sess.run( [self.q_next, self.q_eval], feed_dict={ self.s_: batch_memory[:, -self.n_features:], self.s: batch_memory[:, :self.n_features] }) # 下面这几步十分重要. q_next, q_eval 包含所有 action 的值, # 而我们需要的只是已经选择好的 action 的值, 其他的并不需要. # 所以我们将其他的 action 值全变成 0, 将用到的 action 误差值 反向传递回去, 作为更新凭据. # 这是我们最终要达到的样子, 比如 q_target - q_eval = [1, 0, 0] - [-1, 0, 0] = [2, 0, 0] # q_eval = [-1, 0, 0] 表示这一个记忆中有我选用过 action 0, 而 action 0 带来的 Q(s, a0) = -1, 所以其他的 Q(s, a1) = Q(s, a2) = 0. # q_target = [1, 0, 0] 表示这个记忆中的 r+gamma*maxQ(s_) = 1, 而且不管在 s_ 上我们取了哪个 action, # 我们都需要对应上 q_eval 中的 action 位置, 所以就将 1 放在了 action 0 的位置. # 下面也是为了达到上面说的目的, 不过为了更方面让程序运算, 达到目的的过程有点不同. # 是将 q_eval 全部赋值给 q_target, 这时 q_target-q_eval 全为 0, # 不过 我们再根据 batch_memory 当中的 action 这个 column 来给 q_target 中的对应的 memory-action 位置来修改赋值. # 使新的赋值为 reward + gamma * maxQ(s_), 这样 q_target-q_eval 就可以变成我们所需的样子. # 具体在下面还有一个举例说明. q_target = q_eval.copy() batch_index = np.arange(self.batch_size, dtype=np.int32) eval_act_index = batch_memory[:, self.n_features].astype(int) reward = batch_memory[:, self.n_features + 1] q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1) """ 假如在这个 batch 中, 我们有2个提取的记忆, 根据每个记忆可以生产3个 action 的值: q_eval = [[1, 2, 3], [4, 5, 6]] q_target = q_eval = [[1, 2, 3], [4, 5, 6]] 然后根据 memory 当中的具体 action 位置来修改 q_target 对应 action 上的值: 比如在: 记忆 0 的 q_target 计算值是 -1, 而且我用了 action 0; 记忆 1 的 q_target 计算值是 -2, 而且我用了 action 2: q_target = [[-1, 2, 3], [4, 5, -2]] 所以 (q_target - q_eval) 就变成了: [[(-1)-(1), 0, 0], [0, 0, (-2)-(6)]] 最后我们将这个 (q_target - q_eval) 当成误差, 反向传递会神经网络. 所有为 0 的 action 值是当时没有选择的 action, 之前有选择的 action 才有不为0的值. 我们只反向传递之前选择的 action 的值, """ # 训练 eval_net _, self.cost = self.sess.run([self._train_op, self.loss], feed_dict={self.s: batch_memory[:, :self.n_features], self.q_target: q_target}) self.cost_his.append(self.cost) # 记录 cost 误差 # 逐渐增加 epsilon, 降低行为的随机性 self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max self.learn_step_counter += 1 def plot_cost(self): import matplotlib.pyplot as plt plt.plot(np.arange(len(self.cost_his)), self.cost_his) plt.ylabel('Cost') plt.xlabel('training steps') plt.show() def run_maze(): step = 0 # 用来控制什么时候学习 for episode in range(300): # 初始化环境 observation = env.reset() while True: # 刷新环境 env.render() # DQN 根据观测值选择行为 action = RL.choose_action(observation) # 环境根据行为给出下一个 state, reward, 是否终止 observation_, reward, done = env.step(action) # DQN 存储记忆 RL.store_transition(observation, action, reward, observation_) # 控制学习起始时间和频率 (先累积一些记忆再开始学习) if (step > 200) and (step % 5 == 0): RL.learn() # 将下一个 state_ 变为 下次循环的 state observation = observation_ # 如果终止, 就跳出循环 if done: break step += 1 # 总步数 # end of game print('game over') env.destroy() if __name__ == "__main__": env = Maze() RL = DeepQNetwork(env.n_actions, env.n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=200, # 每 200 步替换一次 target_net 的参数 memory_size=2000, # 记忆上限 # output_graph=True # 是否输出 tensorboard 文件 ) env.after(100, run_maze) env.mainloop() RL.plot_cost() # 观看神经网络的误差曲线
self.canvas.mo
conditional_block
DQN_1.py
import numpy as np import pandas as pd import time import tkinter as tk import tensorflow.compat.v1 as tf ''' 4*4 的迷宫: --------------------------- | 入口 | | | | --------------------------- | | | 陷阱 | | --------------------------- | | 陷阱 | 终点 | | --------------------------- | | | | | --------------------------- ''' UNIT = 40 # pixels MAZE_H = 4 # grid height MAZE_W = 4 # grid width class Maze(tk.Tk, object): def __init__(self): super(Maze, self).__init
lf): self.canvas = tk.Canvas(self, bg='white', height=MAZE_H * UNIT, width=MAZE_W * UNIT) # create grids for c in range(0, MAZE_W * UNIT, UNIT): x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT self.canvas.create_line(x0, y0, x1, y1) for r in range(0, MAZE_H * UNIT, UNIT): x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r self.canvas.create_line(x0, y0, x1, y1) # create origin origin = np.array([20, 20]) # hell hell1_center = origin + np.array([UNIT * 2, UNIT]) self.hell1 = self.canvas.create_rectangle( hell1_center[0] - 15, hell1_center[1] - 15, hell1_center[0] + 15, hell1_center[1] + 15, fill='black') # hell # hell2_center = origin + np.array([UNIT, UNIT * 2]) # self.hell2 = self.canvas.create_rectangle( # hell2_center[0] - 15, hell2_center[1] - 15, # hell2_center[0] + 15, hell2_center[1] + 15, # fill='black') # create oval oval_center = origin + UNIT * 2 self.oval = self.canvas.create_oval( oval_center[0] - 15, oval_center[1] - 15, oval_center[0] + 15, oval_center[1] + 15, fill='yellow') # create red rect self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # pack all self.canvas.pack() def reset(self): self.update() time.sleep(0.1) self.canvas.delete(self.rect) origin = np.array([20, 20]) self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # return observation return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / ( MAZE_H * UNIT) def step(self, action): s = self.canvas.coords(self.rect) base_action = np.array([0, 0]) if action == 0: # up if s[1] > UNIT: base_action[1] -= UNIT elif action == 1: # down if s[1] < (MAZE_H - 1) * UNIT: base_action[1] += UNIT elif action == 2: # right if s[0] < (MAZE_W - 1) * UNIT: base_action[0] += UNIT elif action == 3: # left if s[0] > UNIT: base_action[0] -= UNIT self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent next_coords = self.canvas.coords(self.rect) # next state # reward function if next_coords == self.canvas.coords(self.oval): reward = 1 done = True elif next_coords in [self.canvas.coords(self.hell1)]: reward = -1 done = True else: reward = 0 done = False s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT) return s_, reward, done def render(self): # time.sleep(0.01) self.update() np.random.seed(1) tf.set_random_seed(1) class DeepQNetwork: # 建立神经网络 def _build_net(self): # -------------- 创建 eval 神经网络, 及时提升参数 -------------- tf.compat.v1.disable_eager_execution() self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到 with tf.variable_scope('eval_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names, n_l1, w_initializer, b_initializer = \ ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \ tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers # eval_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1) # eval_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_eval = tf.matmul(l1, w2) + b2 with tf.variable_scope('loss'): # 求误差 self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval)) with tf.variable_scope('train'): # 梯度下降 self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss) # ---------------- 创建 target 神经网络, 提供 target Q --------------------- self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation with tf.variable_scope('target_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES] # target_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1) # target_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_next = tf.matmul(l1, w2) + b2 def __init__( self, n_actions, n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=300, memory_size=500, batch_size=32, e_greedy_increment=None, output_graph=False, ): self.n_actions = n_actions self.n_features = n_features self.lr = learning_rate self.gamma = reward_decay self.epsilon_max = e_greedy # epsilon 的最大值 self.replace_target_iter = replace_target_iter # 更换 target_net 的步数 self.memory_size = memory_size # 记忆上限 self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来 self.epsilon_increment = e_greedy_increment # epsilon 的增量 self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数 # 记录学习次数 (用于判断是否更换 target_net 参数) self.learn_step_counter = 0 # 初始化全 0 记忆 [s, a, r, s_] self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy # 创建 [target_net, evaluate_net] self._build_net() # 替换 target net 的参数 t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数 e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数 self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数 self.sess = tf.Session() # 输出 tensorboard 文件 if output_graph: # $ tensorboard --logdir=logs tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看 def store_transition(self, s, a, r, s_): if not hasattr(self, 'memory_counter'): self.memory_counter = 0 # 记录一条 [s, a, r, s_] 记录 transition = np.hstack((s, [a, r], s_)) # 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换 index = self.memory_counter % self.memory_size self.memory[index, :] = transition # 替换过程 self.memory_counter += 1 def choose_action(self, observation): # 统一 observation 的 shape (1, size_of_observation) observation = observation[np.newaxis, :] if np.random.uniform() < self.epsilon: # 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation}) action = np.argmax(actions_value) else: action = np.random.randint(0, self.n_actions) # 随机选择 return action def learn(self): # 检查是否替换 target_net 参数 if self.learn_step_counter % self.replace_target_iter == 0: self.sess.run(self.replace_target_op) print('\ntarget_params_replaced\n') # 从 memory 中随机抽取 batch_size 这么多记忆 if self.memory_counter > self.memory_size: sample_index = np.random.choice(self.memory_size, size=self.batch_size) else: sample_index = np.random.choice(self.memory_counter, size=self.batch_size) batch_memory = self.memory[sample_index, :] # 获取 q_next (target_net 产生了 q) 和 q_eval(eval_net 产生的 q) q_next, q_eval = self.sess.run( [self.q_next, self.q_eval], feed_dict={ self.s_: batch_memory[:, -self.n_features:], self.s: batch_memory[:, :self.n_features] }) # 下面这几步十分重要. q_next, q_eval 包含所有 action 的值, # 而我们需要的只是已经选择好的 action 的值, 其他的并不需要. # 所以我们将其他的 action 值全变成 0, 将用到的 action 误差值 反向传递回去, 作为更新凭据. # 这是我们最终要达到的样子, 比如 q_target - q_eval = [1, 0, 0] - [-1, 0, 0] = [2, 0, 0] # q_eval = [-1, 0, 0] 表示这一个记忆中有我选用过 action 0, 而 action 0 带来的 Q(s, a0) = -1, 所以其他的 Q(s, a1) = Q(s, a2) = 0. # q_target = [1, 0, 0] 表示这个记忆中的 r+gamma*maxQ(s_) = 1, 而且不管在 s_ 上我们取了哪个 action, # 我们都需要对应上 q_eval 中的 action 位置, 所以就将 1 放在了 action 0 的位置. # 下面也是为了达到上面说的目的, 不过为了更方面让程序运算, 达到目的的过程有点不同. # 是将 q_eval 全部赋值给 q_target, 这时 q_target-q_eval 全为 0, # 不过 我们再根据 batch_memory 当中的 action 这个 column 来给 q_target 中的对应的 memory-action 位置来修改赋值. # 使新的赋值为 reward + gamma * maxQ(s_), 这样 q_target-q_eval 就可以变成我们所需的样子. # 具体在下面还有一个举例说明. q_target = q_eval.copy() batch_index = np.arange(self.batch_size, dtype=np.int32) eval_act_index = batch_memory[:, self.n_features].astype(int) reward = batch_memory[:, self.n_features + 1] q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1) """ 假如在这个 batch 中, 我们有2个提取的记忆, 根据每个记忆可以生产3个 action 的值: q_eval = [[1, 2, 3], [4, 5, 6]] q_target = q_eval = [[1, 2, 3], [4, 5, 6]] 然后根据 memory 当中的具体 action 位置来修改 q_target 对应 action 上的值: 比如在: 记忆 0 的 q_target 计算值是 -1, 而且我用了 action 0; 记忆 1 的 q_target 计算值是 -2, 而且我用了 action 2: q_target = [[-1, 2, 3], [4, 5, -2]] 所以 (q_target - q_eval) 就变成了: [[(-1)-(1), 0, 0], [0, 0, (-2)-(6)]] 最后我们将这个 (q_target - q_eval) 当成误差, 反向传递会神经网络. 所有为 0 的 action 值是当时没有选择的 action, 之前有选择的 action 才有不为0的值. 我们只反向传递之前选择的 action 的值, """ # 训练 eval_net _, self.cost = self.sess.run([self._train_op, self.loss], feed_dict={self.s: batch_memory[:, :self.n_features], self.q_target: q_target}) self.cost_his.append(self.cost) # 记录 cost 误差 # 逐渐增加 epsilon, 降低行为的随机性 self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max self.learn_step_counter += 1 def plot_cost(self): import matplotlib.pyplot as plt plt.plot(np.arange(len(self.cost_his)), self.cost_his) plt.ylabel('Cost') plt.xlabel('training steps') plt.show() def run_maze(): step = 0 # 用来控制什么时候学习 for episode in range(300): # 初始化环境 observation = env.reset() while True: # 刷新环境 env.render() # DQN 根据观测值选择行为 action = RL.choose_action(observation) # 环境根据行为给出下一个 state, reward, 是否终止 observation_, reward, done = env.step(action) # DQN 存储记忆 RL.store_transition(observation, action, reward, observation_) # 控制学习起始时间和频率 (先累积一些记忆再开始学习) if (step > 200) and (step % 5 == 0): RL.learn() # 将下一个 state_ 变为 下次循环的 state observation = observation_ # 如果终止, 就跳出循环 if done: break step += 1 # 总步数 # end of game print('game over') env.destroy() if __name__ == "__main__": env = Maze() RL = DeepQNetwork(env.n_actions, env.n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=200, # 每 200 步替换一次 target_net 的参数 memory_size=2000, # 记忆上限 # output_graph=True # 是否输出 tensorboard 文件 ) env.after(100, run_maze) env.mainloop() RL.plot_cost() # 观看神经网络的误差曲线
__() self.action_space = ['u', 'd', 'l', 'r'] self.n_actions = len(self.action_space) self.n_features = 2 self.title('maze') self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT)) self._build_maze() def _build_maze(se
identifier_body
DQN_1.py
import numpy as np import pandas as pd import time import tkinter as tk import tensorflow.compat.v1 as tf ''' 4*4 的迷宫: --------------------------- | 入口 | | | | --------------------------- | | | 陷阱 | | --------------------------- | | 陷阱 | 终点 | | --------------------------- | | | | | --------------------------- ''' UNIT = 40 # pixels MAZE_H = 4 # grid height MAZE_W = 4 # grid width class Maze(tk.Tk, object): def __init__(self): super(Maze, self).__init__() self.action_space = ['u', 'd', 'l', 'r'] self.n_actions = len(self.action_space) self.n_features = 2 self.title('maze') self.geometry('{0}x{1}'.format(MAZE_H * UNIT, MAZE_H * UNIT)) self._build_maze() def _build_maze(self): self.canvas = tk.Canvas(self, bg='white', height=MAZE_H * UNIT, width=MAZE_W * UNIT) # create grids for c in range(0, MAZE_W * UNIT, UNIT): x0, y0, x1, y1 = c, 0, c, MAZE_H * UNIT self.canvas.create_line(x0, y0, x1, y1) for r in range(0, MAZE_H * UNIT, UNIT): x0, y0, x1, y1 = 0, r, MAZE_W * UNIT, r self.canvas.create_line(x0, y0, x1, y1) # create origin origin = np.array([20, 20]) # hell hell1_center = origin + np.array([UNIT * 2, UNIT]) self.hell1 = self.canvas.create_rectangle( hell1_center[0] - 15, hell1_center[1] - 15, hell1_center[0] + 15, hell1_center[1] + 15, fill='black') # hell # hell2_center = origin + np.array([UNIT, UNIT * 2]) # self.hell2 = self.canvas.create_rectangle( # hell2_center[0] - 15, hell2_center[1] - 15, # hell2_center[0] + 15, hell2_center[1] + 15, # fill='black') # create oval oval_center = origin + UNIT * 2 self.oval = self.canvas.create_oval( oval_center[0] - 15, oval_center[1] - 15, oval_center[0] + 15, oval_center[1] + 15, fill='yellow') # create red rect self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # pack all self.canvas.pack() def reset(self): self.update() time.sleep(0.1) self.canvas.delete(self.rect) origin = np.array([20, 20]) self.rect = self.canvas.create_rectangle( origin[0] - 15, origin[1] - 15, origin[0] + 15, origin[1] + 15, fill='red') # return observation return (np.array(self.canvas.coords(self.rect)[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / ( MAZE_H * UNIT) def step(self, action): s = self.canvas.coords(self.rect) base_action = np.array([0, 0]) if action == 0: # up if s[1] > UNIT: base_action[1] -= UNIT elif action == 1: # down if s[1] < (MAZE_H - 1) * UNIT: base_action[1] += UNIT elif action == 2: # right if s[0] < (MAZE_W - 1) * UNIT: base_action[0] += UNIT elif action == 3: # left if s[0] > UNIT: base_action[0] -= UNIT self.canvas.move(self.rect, base_action[0], base_action[1]) # move agent next_coords = self.canvas.coords(self.rect) # next state # reward function if next_coords == self.canvas.coords(self.oval): reward = 1 done = True elif next_coords in [self.canvas.coords(self.hell1)]: reward = -1 done = True else: reward = 0 done = False s_ = (np.array(next_coords[:2]) - np.array(self.canvas.coords(self.oval)[:2])) / (MAZE_H * UNIT) return s_, reward, done def render(self): # time.sleep(0.01) self.update() np.random.seed(1) tf.set_random_seed(1) class DeepQNetwork: # 建立神经网络 def _build_net(self): # -------------- 创建 eval 神经网络, 及时提升参数 -------------- tf.compat.v1.disable_eager_execution() self.s = tf.placeholder(tf.float32, [None, self.n_features], name='s') # 用来接收 observation self.q_target = tf.placeholder(tf.float32, [None, self.n_actions], name='Q_target') # 用来接收 q_target 的值, 这个之后会通过计算得到 with tf.variable_scope('eval_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names, n_l1, w_initializer, b_initializer = \ ['eval_net_params', tf.GraphKeys.GLOBAL_VARIABLES], 10, \ tf.random_normal_initializer(0., 0.3), tf.constant_initializer(0.1) # config of layers # eval_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s, w1) + b1) # eval_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_eval = tf.matmul(l1, w2) + b2 with tf.variable_scope('loss'): # 求误差 self.loss = tf.reduce_mean(tf.squared_difference(self.q_target, self.q_eval)) with tf.variable_scope('train'): # 梯度下降 self._train_op = tf.train.RMSPropOptimizer(self.lr).minimize(self.loss) # ---------------- 创建 target 神经网络, 提供 target Q --------------------- self.s_ = tf.placeholder(tf.float32, [None, self.n_features], name='s_') # 接收下个 observation with tf.variable_scope('target_net'): # c_names(collections_names) 是在更新 target_net 参数时会用到 c_names = ['target_net_params', tf.GraphKeys.GLOBAL_VARIABLES] # target_net 的第一层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l1'): w1 = tf.get_variable('w1', [self.n_features, n_l1], initializer=w_initializer, collections=c_names) b1 = tf.get_variable('b1', [1, n_l1], initializer=b_initializer, collections=c_names) l1 = tf.nn.relu(tf.matmul(self.s_, w1) + b1) # target_net 的第二层. collections 是在更新 target_net 参数时会用到 with tf.variable_scope('l2'): w2 = tf.get_variable('w2', [n_l1, self.n_actions], initializer=w_initializer, collections=c_names) b2 = tf.get_variable('b2', [1, self.n_actions], initializer=b_initializer, collections=c_names) self.q_next = tf.matmul(l1, w2) + b2 def __init__( self, n_actions, n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=300, memory_size=500, batch_size=32, e_greedy_increment=None, output_graph=False, ): self.n_actions = n_actions self.n_features = n_features self.lr = learning_rate self.gamma = reward_decay self.epsilon_max = e_greedy # epsilon 的最大值 self.replace_target_iter = replace_target_iter # 更换 target_net 的步数 self.memory_size = memory_size # 记忆上限 self.batch_size = batch_size # 每次更新时从 memory 里面取多少记忆出来 self.epsilon_increment = e_greedy_increment # epsilon 的增量 self.epsilon = 0 if e_greedy_increment is not None else self.epsilon_max # 是否开启探索模式, 并逐步减少探索次数 # 记录学习次数 (用于判断是否更换 target_net 参数) self.learn_step_counter = 0 # 初始化全 0 记忆 [s, a, r, s_] self.memory = np.zeros((self.memory_size, n_features * 2 + 2)) # 和视频中不同, 因为 pandas 运算比较慢, 这里改为直接用 numpy # 创建 [target_net, evaluate_net] self._build_net() # 替换 target net 的参数 t_params = tf.get_collection('target_net_params') # 提取 target_net 的参数 e_params = tf.get_collection('eval_net_params') # 提取 eval_net 的参数 self.replace_target_op = [tf.assign(t, e) for t, e in zip(t_params, e_params)] # 更新 target_net 参数 self.sess = tf.Session() # 输出 tensorboard 文件 if output_graph: # $ tensorboard --logdir=logs tf.summary.FileWriter("logs/", self.sess.graph) self.sess.run(tf.global_variables_initializer()) self.cost_his = [] # 记录所有 cost 变化, 用于最后 plot 出来观看 def store_transition(self, s, a, r, s_): if not hasattr(self, 'memory_counter'): self.memory_counter = 0 # 记录一条 [s, a, r, s_] 记录 transition = np.hstack((s, [a, r], s_)) # 总 memory 大小是固定的, 如果超出总大小, 旧 memory 就被新 memory 替换 index = self.memory_counter % self.memory_size self.memory[index, :] = transition # 替换过程 self.memory_counter += 1 def choose_action(self, observation): # 统一 observation 的 shape (1, size_of_observation) observation = observation[np.newaxis, :] if np.random.uniform() < self.epsilon: # 让 eval_net 神经网络生成所有 action 的值, 并选择值最大的 action actions_value = self.sess.run(self.q_eval, feed_dict={self.s: observation}) action = np.argmax(actions_value) else: action = np.random.randint(0, self.n_actions) # 随机选择 return action def learn(self): # 检查是否替换 target_net 参数 if self.learn_step_counter % self.replace_target_iter == 0: self.sess.run(self.replace_target_op) print('\ntarget_params_replaced\n') # 从 memory 中随机抽取 batch_size 这么多记忆 if self.memory_counter > self.memory_size: sample_index = np.random.choice(self.memory_size, size=self.batch_size) else: sample_index = np.random.choice(self.memory_counter, size=self.batch_size) batch_memory = self.memory[sample_index, :] # 获取 q_next (target_net 产生了 q) 和 q_eval(eval_net 产生的 q) q_next, q_eval = self.sess.run( [self.q_next, self.q_eval], feed_dict={ self.s_: batch_memory[:, -self.n_features:], self.s: batch_memory[:, :self.n_features] }) # 下面这几步十分重要. q_next, q_eval 包含所有 action 的值, # 而我们需要的只是已经选择好的 action 的值, 其他的并不需要. # 所以我们将其他的 action 值全变成 0, 将用到的 action 误差值 反向传递回去, 作为更新凭据. # 这是我们最终要达到的样子, 比如 q_target - q_eval = [1, 0, 0] - [-1, 0, 0] = [2, 0, 0] # q_eval = [-1, 0, 0] 表示这一个记忆中有我选用过 action 0, 而 action 0 带来的 Q(s, a0) = -1, 所以其他的 Q(s, a1) = Q(s, a2) = 0. # q_target = [1, 0, 0] 表示这个记忆中的 r+gamma*maxQ(s_) = 1, 而且不管在 s_ 上我们取了哪个 action, # 我们都需要对应上 q_eval 中的 action 位置, 所以就将 1 放在了 action 0 的位置. # 下面也是为了达到上面说的目的, 不过为了更方面让程序运算, 达到目的的过程有点不同. # 是将 q_eval 全部赋值给 q_target, 这时 q_target-q_eval 全为 0, # 不过 我们再根据 batch_memory 当中的 action 这个 column 来给 q_target 中的对应的 memory-action 位置来修改赋值. # 使新的赋值为 reward + gamma * maxQ(s_), 这样 q_target-q_eval 就可以变成我们所需的样子. # 具体在下面还有一个举例说明. q_target = q_eval.copy() batch_index = np.arange(self.batch_size, dtype=np.int32) eval_act_index = batch_memory[:, self.n_features].astype(int) reward = batch_memory[:, self.n_features + 1] q_target[batch_index, eval_act_index] = reward + self.gamma * np.max(q_next, axis=1) """ 假如在这个 batch 中, 我们有2个提取的记忆, 根据每个记忆可以生产3个 action 的值: q_eval = [[1, 2, 3], [4, 5, 6]] q_target = q_eval = [[1, 2, 3], [4, 5, 6]] 然后根据 memory 当中的具体 action 位置来修改 q_target 对应 action 上的值: 比如在: 记忆 0 的 q_target 计算值是 -1, 而且我用了 action 0; 记忆 1 的 q_target 计算值是 -2, 而且我用了 action 2: q_target = [[-1, 2, 3], [4, 5, -2]] 所以 (q_target - q_eval) 就变成了: [[(-1)-(1), 0, 0], [0, 0, (-2)-(6)]] 最后我们将这个 (q_target - q_eval) 当成误差, 反向传递会神经网络. 所有为 0 的 action 值是当时没有选择的 action, 之前有选择的 action 才有不为0的值. 我们只反向传递之前选择的 action 的值, """ # 训练 eval_net _, self.cost = self.sess.run([self._train_op, self.loss], feed_dict={self.s: batch_memory[:, :self.n_features], self.q_target: q_target}) self.cost_his.append(self.cost) # 记录 cost 误差 # 逐渐增加 epsilon, 降低行为的随机性 self.epsilon = self.epsilon + self.epsilon_increment if self.epsilon < self.epsilon_max else self.epsilon_max
plt.plot(np.arange(len(self.cost_his)), self.cost_his) plt.ylabel('Cost') plt.xlabel('training steps') plt.show() def run_maze(): step = 0 # 用来控制什么时候学习 for episode in range(300): # 初始化环境 observation = env.reset() while True: # 刷新环境 env.render() # DQN 根据观测值选择行为 action = RL.choose_action(observation) # 环境根据行为给出下一个 state, reward, 是否终止 observation_, reward, done = env.step(action) # DQN 存储记忆 RL.store_transition(observation, action, reward, observation_) # 控制学习起始时间和频率 (先累积一些记忆再开始学习) if (step > 200) and (step % 5 == 0): RL.learn() # 将下一个 state_ 变为 下次循环的 state observation = observation_ # 如果终止, 就跳出循环 if done: break step += 1 # 总步数 # end of game print('game over') env.destroy() if __name__ == "__main__": env = Maze() RL = DeepQNetwork(env.n_actions, env.n_features, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9, replace_target_iter=200, # 每 200 步替换一次 target_net 的参数 memory_size=2000, # 记忆上限 # output_graph=True # 是否输出 tensorboard 文件 ) env.after(100, run_maze) env.mainloop() RL.plot_cost() # 观看神经网络的误差曲线
self.learn_step_counter += 1 def plot_cost(self): import matplotlib.pyplot as plt
random_line_split
utils.py
import datetime import time import logging import dropbox import json import StringIO import hashlib import os import re import cProfile from jose import jws from dropbox.rest import ErrorResponse from django.contrib import messages from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.contrib.auth import get_user_model from core import defaults import sys import _ast from pyflakes import checker from pyflakes import reporter as modReporter from pyflakes.messages import Message from django.core.urlresolvers import reverse from django.test import RequestFactory class UnAuthorized(Exception): pass class NotFound(Exception):
class NoBasesFound(Exception): pass logger = logging.getLogger(__name__) class Connection(object): def __init__(self, dropbox_access_token): self.client = dropbox.client.DropboxClient(dropbox_access_token) super(Connection, self).__init__() def info(self): account_info = self.client.account_info() email = account_info['email'] name = account_info['display_name'] return email, name def listing(self): bases = [] for base in self._call('metadata', '/')['contents']: bases.append(base['path'].lstrip('/')) if len(bases) == 0: raise NoBasesFound() return bases def get_file(self, path): logger.debug("get file %s" % path) return self._call('get_file', path) def get_file_content_and_rev(self, path): file, metadata = self._call('get_file_and_metadata', path) content = file.read() file.close() rev = metadata['rev'] return content, rev def get_file_content(self, path): logger.debug("return content %s" % path) return self.get_file(path).read() def put_file(self, path, content): f = StringIO.StringIO(content) return self._call('put_file', path, f, True) def delete_file(self, path): return self._call('file_delete', path) def create_folder(self, path): return self._call('file_create_folder', path) def delta(self, cursor): return self._call('delta', cursor) def _call(self, ms, *args): try: m = getattr(self.client, ms) return m(*args) except ErrorResponse, e: if e.__dict__['status'] == 401: raise UnAuthorized(e.__dict__['body']['error']) if e.__dict__['status'] == 404: raise NotFound(e.__dict__['body']['error']) raise e except Exception, e: raise e def metadata(self, path): return self._call('metadata', path) def directory_zip(self, path, zf): logger.info("download "+path) try: f_metadata = self.metadata(path) if f_metadata['is_dir']: for content in f_metadata['contents']: logger.info("download "+content['path']) if content['is_dir']: self.directory_zip(content['path'], zf) else: # get the file filepath = content['path'] try: file = self.get_file(filepath) filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath) logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new)) zf.writestr(os.path.relpath(filepath_new, "/"), file.read()) file.close() except ErrorResponse, e: logger.error(e) except ErrorResponse, e: logger.error(e) return zf def message(request, level, message): dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") if level == logging.ERROR: tag = "alert-danger" elif level == logging.INFO: tag = "alert-info" elif level == logging.WARN: tag = "alert-info" messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag) def sign(data): m = hashlib.md5() m.update(data) m.update(settings.SECRET_KEY) return "%s-%s" % (data, m.hexdigest()[:10]) def check_code(code, name): errors = [] class CustomMessage(object): pass reporter = modReporter._makeDefaultReporter() try: tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. reporter.unexpectedError(name, 'problem decoding source') else: reporter.syntaxError(name, msg, lineno, offset, text) loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "SyntaxError" errors.append(msg) except Exception, e: loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "Problem decoding source" errors.append(msg) reporter.unexpectedError(name, 'problem decoding source') logger.error("problem decoding source") logger.exception() r = [] try: w = checker.Checker(tree, name) r = w.messages for message in w.messages: logger.info(str(message)) except UnboundLocalError, e: pass return not (len(r) > 0 or len(errors) > 0), r, errors def load_setting(name, fail=True): v = None default = getattr(defaults, name, None) setting = getattr(settings, name, None) if setting: v = setting logger.debug("Loaded setting from settings %s with value: %s" % (name, v)) elif default: v = default logger.debug("Loaded setting from defaults %s with value: %s" % (name, v)) if not v and fail: logger.error("Could not load setting %s" % name) raise ImproperlyConfigured(name) return v def load_var_to_file(var): path = "/tmp/" fq_file = os.path.join(path, var) content = os.environ[var] if not os.path.exists(path): os.mkdir(path) if not os.path.exists(fq_file): f = open(fq_file, 'w') f.write(content) f.close() if sys.platform == "darwin": os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file)) else: os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file)) return fq_file def call_apy(base_name, apy_name): logger.info("START call_apy") try: from core.models import Apy apy = Apy.objects.get(name=apy_name, base__name=base_name) logger.info("START call_apy %s" % apy.name) url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id}) request_factory = RequestFactory() request = request_factory.get(url, data={'json': "", 'base': apy.base.name, 'id': apy.id}) # TODO: fails if user admin is not created, and must have a authprofile, knockknock request.user = get_user_model().objects.get(username='admin') request.META['HTTP_ACCEPT'] = "text/html" from core.views import ExecView view = ExecView() response = view.get(request, base=apy.base.name, id=apy.id) logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code)) logger.info("END call_apy %s" % apy.name) except Exception, e: logger.error("ERROR call_apy") logger.exception(e) def profileit(func): """ Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result """ def wrapper(*args, **kwargs): prof = cProfile.Profile() # if not os.environ.has_key("PROFILE_DO_FUNC"): # return func(*args, **kwargs) retval = prof.runcall(func, *args, **kwargs) # Note use of name from outer scope # prof.dump_stats(name) import pstats s = pstats.Stats(prof).sort_stats('time') s.print_stats(8) return retval return wrapper def totimestamp(t): logger.debug("totimestamp: %s" % t) return (t-datetime.datetime(1970, 1, 1)).total_seconds() def fromtimestamp(t): logger.debug("fromtimestamp: %s" % t) return datetime.datetime.fromtimestamp(t) def create_jwt(user, secret): """The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk.""" logger.debug("Create JWT with secret %s" % secret) # username = request.POST['username'] # password = request.POST['password' expiry = datetime.datetime.now() + datetime.timedelta(seconds=30) expiry_s = time.mktime(expiry.timetuple()) if user.is_authenticated(): internalid = user.authprofile.internalid payload = {'username': user.username, 'expiry': expiry_s, 'type': "AuthenticatedUser", 'internalid': internalid, 'email': user.email} token = jws.sign(payload, secret, algorithm='HS256') else: payload = {'expiry':expiry_s, 'type': "AnonymousUser", 'internalid': None, 'email': None} token = jws.sign(payload, secret, algorithm='HS256') logger.debug("Payload: %s" % payload) # logger.info("Token: %s" % token) return token def read_jwt(payload, secret): logger.debug("Read JWT with secret %s" % secret) logger.debug("Payload: %s" % payload) decoded_dict = json.loads(jws.verify(payload, secret, algorithms=['HS256'])) logger.info("Identity: %s" % decoded_dict) # print decoded_dict # print type(decoded_dict) username = decoded_dict.get('username', None) expiry = decoded_dict.get('expiry', None) # if datetime.datetime.utcfromtimestamp(expiry) < datetime.datetime.now(): # raise Exception("AuthenticationFailed: (_('Token Expired.')") return (username, decoded_dict)
pass
identifier_body
utils.py
import datetime import time import logging import dropbox import json import StringIO import hashlib import os import re import cProfile from jose import jws from dropbox.rest import ErrorResponse from django.contrib import messages from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.contrib.auth import get_user_model from core import defaults import sys import _ast from pyflakes import checker from pyflakes import reporter as modReporter from pyflakes.messages import Message from django.core.urlresolvers import reverse from django.test import RequestFactory class UnAuthorized(Exception): pass class NotFound(Exception): pass class NoBasesFound(Exception): pass logger = logging.getLogger(__name__) class Connection(object): def __init__(self, dropbox_access_token): self.client = dropbox.client.DropboxClient(dropbox_access_token) super(Connection, self).__init__() def info(self): account_info = self.client.account_info() email = account_info['email'] name = account_info['display_name'] return email, name def listing(self): bases = [] for base in self._call('metadata', '/')['contents']: bases.append(base['path'].lstrip('/')) if len(bases) == 0: raise NoBasesFound() return bases def get_file(self, path): logger.debug("get file %s" % path) return self._call('get_file', path) def get_file_content_and_rev(self, path): file, metadata = self._call('get_file_and_metadata', path) content = file.read() file.close() rev = metadata['rev'] return content, rev def get_file_content(self, path): logger.debug("return content %s" % path) return self.get_file(path).read() def put_file(self, path, content): f = StringIO.StringIO(content) return self._call('put_file', path, f, True) def delete_file(self, path): return self._call('file_delete', path) def create_folder(self, path): return self._call('file_create_folder', path) def delta(self, cursor): return self._call('delta', cursor) def _call(self, ms, *args): try: m = getattr(self.client, ms) return m(*args) except ErrorResponse, e: if e.__dict__['status'] == 401: raise UnAuthorized(e.__dict__['body']['error']) if e.__dict__['status'] == 404: raise NotFound(e.__dict__['body']['error']) raise e except Exception, e: raise e def metadata(self, path): return self._call('metadata', path) def directory_zip(self, path, zf): logger.info("download "+path) try: f_metadata = self.metadata(path) if f_metadata['is_dir']: for content in f_metadata['contents']: logger.info("download "+content['path']) if content['is_dir']: self.directory_zip(content['path'], zf) else: # get the file filepath = content['path'] try: file = self.get_file(filepath) filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath) logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new)) zf.writestr(os.path.relpath(filepath_new, "/"), file.read()) file.close() except ErrorResponse, e: logger.error(e) except ErrorResponse, e: logger.error(e) return zf def message(request, level, message): dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") if level == logging.ERROR: tag = "alert-danger" elif level == logging.INFO: tag = "alert-info" elif level == logging.WARN: tag = "alert-info" messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag) def sign(data): m = hashlib.md5() m.update(data) m.update(settings.SECRET_KEY) return "%s-%s" % (data, m.hexdigest()[:10]) def
(code, name): errors = [] class CustomMessage(object): pass reporter = modReporter._makeDefaultReporter() try: tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. reporter.unexpectedError(name, 'problem decoding source') else: reporter.syntaxError(name, msg, lineno, offset, text) loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "SyntaxError" errors.append(msg) except Exception, e: loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "Problem decoding source" errors.append(msg) reporter.unexpectedError(name, 'problem decoding source') logger.error("problem decoding source") logger.exception() r = [] try: w = checker.Checker(tree, name) r = w.messages for message in w.messages: logger.info(str(message)) except UnboundLocalError, e: pass return not (len(r) > 0 or len(errors) > 0), r, errors def load_setting(name, fail=True): v = None default = getattr(defaults, name, None) setting = getattr(settings, name, None) if setting: v = setting logger.debug("Loaded setting from settings %s with value: %s" % (name, v)) elif default: v = default logger.debug("Loaded setting from defaults %s with value: %s" % (name, v)) if not v and fail: logger.error("Could not load setting %s" % name) raise ImproperlyConfigured(name) return v def load_var_to_file(var): path = "/tmp/" fq_file = os.path.join(path, var) content = os.environ[var] if not os.path.exists(path): os.mkdir(path) if not os.path.exists(fq_file): f = open(fq_file, 'w') f.write(content) f.close() if sys.platform == "darwin": os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file)) else: os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file)) return fq_file def call_apy(base_name, apy_name): logger.info("START call_apy") try: from core.models import Apy apy = Apy.objects.get(name=apy_name, base__name=base_name) logger.info("START call_apy %s" % apy.name) url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id}) request_factory = RequestFactory() request = request_factory.get(url, data={'json': "", 'base': apy.base.name, 'id': apy.id}) # TODO: fails if user admin is not created, and must have a authprofile, knockknock request.user = get_user_model().objects.get(username='admin') request.META['HTTP_ACCEPT'] = "text/html" from core.views import ExecView view = ExecView() response = view.get(request, base=apy.base.name, id=apy.id) logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code)) logger.info("END call_apy %s" % apy.name) except Exception, e: logger.error("ERROR call_apy") logger.exception(e) def profileit(func): """ Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result """ def wrapper(*args, **kwargs): prof = cProfile.Profile() # if not os.environ.has_key("PROFILE_DO_FUNC"): # return func(*args, **kwargs) retval = prof.runcall(func, *args, **kwargs) # Note use of name from outer scope # prof.dump_stats(name) import pstats s = pstats.Stats(prof).sort_stats('time') s.print_stats(8) return retval return wrapper def totimestamp(t): logger.debug("totimestamp: %s" % t) return (t-datetime.datetime(1970, 1, 1)).total_seconds() def fromtimestamp(t): logger.debug("fromtimestamp: %s" % t) return datetime.datetime.fromtimestamp(t) def create_jwt(user, secret): """The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk.""" logger.debug("Create JWT with secret %s" % secret) # username = request.POST['username'] # password = request.POST['password' expiry = datetime.datetime.now() + datetime.timedelta(seconds=30) expiry_s = time.mktime(expiry.timetuple()) if user.is_authenticated(): internalid = user.authprofile.internalid payload = {'username': user.username, 'expiry': expiry_s, 'type': "AuthenticatedUser", 'internalid': internalid, 'email': user.email} token = jws.sign(payload, secret, algorithm='HS256') else: payload = {'expiry':expiry_s, 'type': "AnonymousUser", 'internalid': None, 'email': None} token = jws.sign(payload, secret, algorithm='HS256') logger.debug("Payload: %s" % payload) # logger.info("Token: %s" % token) return token def read_jwt(payload, secret): logger.debug("Read JWT with secret %s" % secret) logger.debug("Payload: %s" % payload) decoded_dict = json.loads(jws.verify(payload, secret, algorithms=['HS256'])) logger.info("Identity: %s" % decoded_dict) # print decoded_dict # print type(decoded_dict) username = decoded_dict.get('username', None) expiry = decoded_dict.get('expiry', None) # if datetime.datetime.utcfromtimestamp(expiry) < datetime.datetime.now(): # raise Exception("AuthenticationFailed: (_('Token Expired.')") return (username, decoded_dict)
check_code
identifier_name
utils.py
import datetime import time import logging import dropbox import json import StringIO import hashlib import os import re import cProfile from jose import jws from dropbox.rest import ErrorResponse from django.contrib import messages from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.contrib.auth import get_user_model from core import defaults import sys import _ast from pyflakes import checker from pyflakes import reporter as modReporter from pyflakes.messages import Message from django.core.urlresolvers import reverse from django.test import RequestFactory class UnAuthorized(Exception): pass class NotFound(Exception): pass class NoBasesFound(Exception): pass logger = logging.getLogger(__name__) class Connection(object): def __init__(self, dropbox_access_token): self.client = dropbox.client.DropboxClient(dropbox_access_token) super(Connection, self).__init__() def info(self): account_info = self.client.account_info() email = account_info['email'] name = account_info['display_name'] return email, name def listing(self): bases = [] for base in self._call('metadata', '/')['contents']: bases.append(base['path'].lstrip('/')) if len(bases) == 0: raise NoBasesFound() return bases def get_file(self, path): logger.debug("get file %s" % path) return self._call('get_file', path) def get_file_content_and_rev(self, path): file, metadata = self._call('get_file_and_metadata', path) content = file.read() file.close() rev = metadata['rev'] return content, rev def get_file_content(self, path): logger.debug("return content %s" % path) return self.get_file(path).read() def put_file(self, path, content): f = StringIO.StringIO(content) return self._call('put_file', path, f, True) def delete_file(self, path): return self._call('file_delete', path) def create_folder(self, path): return self._call('file_create_folder', path) def delta(self, cursor): return self._call('delta', cursor) def _call(self, ms, *args): try: m = getattr(self.client, ms) return m(*args) except ErrorResponse, e: if e.__dict__['status'] == 401: raise UnAuthorized(e.__dict__['body']['error']) if e.__dict__['status'] == 404: raise NotFound(e.__dict__['body']['error']) raise e except Exception, e: raise e def metadata(self, path): return self._call('metadata', path) def directory_zip(self, path, zf): logger.info("download "+path) try: f_metadata = self.metadata(path) if f_metadata['is_dir']: for content in f_metadata['contents']: logger.info("download "+content['path']) if content['is_dir']: self.directory_zip(content['path'], zf) else: # get the file filepath = content['path'] try: file = self.get_file(filepath) filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath) logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new)) zf.writestr(os.path.relpath(filepath_new, "/"), file.read()) file.close() except ErrorResponse, e: logger.error(e) except ErrorResponse, e: logger.error(e) return zf def message(request, level, message): dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") if level == logging.ERROR: tag = "alert-danger" elif level == logging.INFO: tag = "alert-info" elif level == logging.WARN: tag = "alert-info" messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag) def sign(data): m = hashlib.md5() m.update(data) m.update(settings.SECRET_KEY) return "%s-%s" % (data, m.hexdigest()[:10]) def check_code(code, name): errors = [] class CustomMessage(object): pass reporter = modReporter._makeDefaultReporter() try: tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. reporter.unexpectedError(name, 'problem decoding source') else: reporter.syntaxError(name, msg, lineno, offset, text) loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "SyntaxError" errors.append(msg) except Exception, e: loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "Problem decoding source" errors.append(msg) reporter.unexpectedError(name, 'problem decoding source') logger.error("problem decoding source") logger.exception() r = [] try: w = checker.Checker(tree, name) r = w.messages for message in w.messages: logger.info(str(message)) except UnboundLocalError, e: pass return not (len(r) > 0 or len(errors) > 0), r, errors def load_setting(name, fail=True): v = None default = getattr(defaults, name, None) setting = getattr(settings, name, None) if setting: v = setting logger.debug("Loaded setting from settings %s with value: %s" % (name, v)) elif default: v = default logger.debug("Loaded setting from defaults %s with value: %s" % (name, v)) if not v and fail: logger.error("Could not load setting %s" % name) raise ImproperlyConfigured(name) return v def load_var_to_file(var): path = "/tmp/" fq_file = os.path.join(path, var) content = os.environ[var] if not os.path.exists(path): os.mkdir(path) if not os.path.exists(fq_file): f = open(fq_file, 'w') f.write(content) f.close() if sys.platform == "darwin": os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file)) else: os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file)) return fq_file def call_apy(base_name, apy_name): logger.info("START call_apy") try: from core.models import Apy apy = Apy.objects.get(name=apy_name, base__name=base_name) logger.info("START call_apy %s" % apy.name) url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id}) request_factory = RequestFactory() request = request_factory.get(url, data={'json': "", 'base': apy.base.name, 'id': apy.id}) # TODO: fails if user admin is not created, and must have a authprofile, knockknock request.user = get_user_model().objects.get(username='admin') request.META['HTTP_ACCEPT'] = "text/html" from core.views import ExecView view = ExecView() response = view.get(request, base=apy.base.name, id=apy.id) logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code)) logger.info("END call_apy %s" % apy.name) except Exception, e: logger.error("ERROR call_apy") logger.exception(e) def profileit(func): """ Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result """ def wrapper(*args, **kwargs): prof = cProfile.Profile() # if not os.environ.has_key("PROFILE_DO_FUNC"): # return func(*args, **kwargs) retval = prof.runcall(func, *args, **kwargs) # Note use of name from outer scope # prof.dump_stats(name) import pstats s = pstats.Stats(prof).sort_stats('time') s.print_stats(8) return retval return wrapper def totimestamp(t): logger.debug("totimestamp: %s" % t) return (t-datetime.datetime(1970, 1, 1)).total_seconds() def fromtimestamp(t): logger.debug("fromtimestamp: %s" % t) return datetime.datetime.fromtimestamp(t) def create_jwt(user, secret): """The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk.""" logger.debug("Create JWT with secret %s" % secret) # username = request.POST['username'] # password = request.POST['password' expiry = datetime.datetime.now() + datetime.timedelta(seconds=30) expiry_s = time.mktime(expiry.timetuple()) if user.is_authenticated(): internalid = user.authprofile.internalid payload = {'username': user.username, 'expiry': expiry_s, 'type': "AuthenticatedUser", 'internalid': internalid, 'email': user.email} token = jws.sign(payload, secret, algorithm='HS256') else: payload = {'expiry':expiry_s, 'type': "AnonymousUser", 'internalid': None, 'email': None} token = jws.sign(payload, secret, algorithm='HS256') logger.debug("Payload: %s" % payload) # logger.info("Token: %s" % token)
logger.debug("Read JWT with secret %s" % secret) logger.debug("Payload: %s" % payload) decoded_dict = json.loads(jws.verify(payload, secret, algorithms=['HS256'])) logger.info("Identity: %s" % decoded_dict) # print decoded_dict # print type(decoded_dict) username = decoded_dict.get('username', None) expiry = decoded_dict.get('expiry', None) # if datetime.datetime.utcfromtimestamp(expiry) < datetime.datetime.now(): # raise Exception("AuthenticationFailed: (_('Token Expired.')") return (username, decoded_dict)
return token def read_jwt(payload, secret):
random_line_split
utils.py
import datetime import time import logging import dropbox import json import StringIO import hashlib import os import re import cProfile from jose import jws from dropbox.rest import ErrorResponse from django.contrib import messages from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.contrib.auth import get_user_model from core import defaults import sys import _ast from pyflakes import checker from pyflakes import reporter as modReporter from pyflakes.messages import Message from django.core.urlresolvers import reverse from django.test import RequestFactory class UnAuthorized(Exception): pass class NotFound(Exception): pass class NoBasesFound(Exception): pass logger = logging.getLogger(__name__) class Connection(object): def __init__(self, dropbox_access_token): self.client = dropbox.client.DropboxClient(dropbox_access_token) super(Connection, self).__init__() def info(self): account_info = self.client.account_info() email = account_info['email'] name = account_info['display_name'] return email, name def listing(self): bases = [] for base in self._call('metadata', '/')['contents']: bases.append(base['path'].lstrip('/')) if len(bases) == 0: raise NoBasesFound() return bases def get_file(self, path): logger.debug("get file %s" % path) return self._call('get_file', path) def get_file_content_and_rev(self, path): file, metadata = self._call('get_file_and_metadata', path) content = file.read() file.close() rev = metadata['rev'] return content, rev def get_file_content(self, path): logger.debug("return content %s" % path) return self.get_file(path).read() def put_file(self, path, content): f = StringIO.StringIO(content) return self._call('put_file', path, f, True) def delete_file(self, path): return self._call('file_delete', path) def create_folder(self, path): return self._call('file_create_folder', path) def delta(self, cursor): return self._call('delta', cursor) def _call(self, ms, *args): try: m = getattr(self.client, ms) return m(*args) except ErrorResponse, e: if e.__dict__['status'] == 401: raise UnAuthorized(e.__dict__['body']['error']) if e.__dict__['status'] == 404: raise NotFound(e.__dict__['body']['error']) raise e except Exception, e: raise e def metadata(self, path): return self._call('metadata', path) def directory_zip(self, path, zf): logger.info("download "+path) try: f_metadata = self.metadata(path) if f_metadata['is_dir']: for content in f_metadata['contents']: logger.info("download "+content['path']) if content['is_dir']: self.directory_zip(content['path'], zf) else: # get the file filepath = content['path'] try: file = self.get_file(filepath) filepath_new = re.sub(r"(.*?)/(.+?)(\/.*)", r"\2", filepath) logger.debug("Add file '%s' as '%s' to zip" % (filepath, filepath_new)) zf.writestr(os.path.relpath(filepath_new, "/"), file.read()) file.close() except ErrorResponse, e: logger.error(e) except ErrorResponse, e: logger.error(e) return zf def message(request, level, message): dt = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") if level == logging.ERROR: tag = "alert-danger" elif level == logging.INFO: tag = "alert-info" elif level == logging.WARN: tag = "alert-info" messages.error(request, dt + " " + str(message)[:1000], extra_tags="%s safe" % tag) def sign(data): m = hashlib.md5() m.update(data) m.update(settings.SECRET_KEY) return "%s-%s" % (data, m.hexdigest()[:10]) def check_code(code, name): errors = [] class CustomMessage(object): pass reporter = modReporter._makeDefaultReporter() try: tree = compile(code, name, "exec", _ast.PyCF_ONLY_AST) except SyntaxError: value = sys.exc_info()[1] msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. reporter.unexpectedError(name, 'problem decoding source') else: reporter.syntaxError(name, msg, lineno, offset, text) loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "SyntaxError" errors.append(msg) except Exception, e: loc = CustomMessage() loc.lineno = lineno loc.offset = offset msg = Message(name, loc) msg.message = "Problem decoding source" errors.append(msg) reporter.unexpectedError(name, 'problem decoding source') logger.error("problem decoding source") logger.exception() r = [] try: w = checker.Checker(tree, name) r = w.messages for message in w.messages: logger.info(str(message)) except UnboundLocalError, e: pass return not (len(r) > 0 or len(errors) > 0), r, errors def load_setting(name, fail=True): v = None default = getattr(defaults, name, None) setting = getattr(settings, name, None) if setting: v = setting logger.debug("Loaded setting from settings %s with value: %s" % (name, v)) elif default: v = default logger.debug("Loaded setting from defaults %s with value: %s" % (name, v)) if not v and fail: logger.error("Could not load setting %s" % name) raise ImproperlyConfigured(name) return v def load_var_to_file(var): path = "/tmp/" fq_file = os.path.join(path, var) content = os.environ[var] if not os.path.exists(path):
if not os.path.exists(fq_file): f = open(fq_file, 'w') f.write(content) f.close() if sys.platform == "darwin": os.popen4("echo $(cat %s) > %s" % (fq_file, fq_file)) else: os.popen4("echo -e $(cat %s) > %s" % (fq_file, fq_file)) return fq_file def call_apy(base_name, apy_name): logger.info("START call_apy") try: from core.models import Apy apy = Apy.objects.get(name=apy_name, base__name=base_name) logger.info("START call_apy %s" % apy.name) url = reverse('exec', kwargs={'base': apy.base.name, 'id': apy.id}) request_factory = RequestFactory() request = request_factory.get(url, data={'json': "", 'base': apy.base.name, 'id': apy.id}) # TODO: fails if user admin is not created, and must have a authprofile, knockknock request.user = get_user_model().objects.get(username='admin') request.META['HTTP_ACCEPT'] = "text/html" from core.views import ExecView view = ExecView() response = view.get(request, base=apy.base.name, id=apy.id) logger.info("method called for base %s, response_code: %s" % (apy.base.name, response.status_code)) logger.info("END call_apy %s" % apy.name) except Exception, e: logger.error("ERROR call_apy") logger.exception(e) def profileit(func): """ Taken from http://stackoverflow.com/questions/5375624/a-decorator-that-profiles-a-method-call-and-logs-the-profiling-result """ def wrapper(*args, **kwargs): prof = cProfile.Profile() # if not os.environ.has_key("PROFILE_DO_FUNC"): # return func(*args, **kwargs) retval = prof.runcall(func, *args, **kwargs) # Note use of name from outer scope # prof.dump_stats(name) import pstats s = pstats.Stats(prof).sort_stats('time') s.print_stats(8) return retval return wrapper def totimestamp(t): logger.debug("totimestamp: %s" % t) return (t-datetime.datetime(1970, 1, 1)).total_seconds() def fromtimestamp(t): logger.debug("fromtimestamp: %s" % t) return datetime.datetime.fromtimestamp(t) def create_jwt(user, secret): """The above token need to be saved in database, and a one-to-one relation should exist with the username/user_pk.""" logger.debug("Create JWT with secret %s" % secret) # username = request.POST['username'] # password = request.POST['password' expiry = datetime.datetime.now() + datetime.timedelta(seconds=30) expiry_s = time.mktime(expiry.timetuple()) if user.is_authenticated(): internalid = user.authprofile.internalid payload = {'username': user.username, 'expiry': expiry_s, 'type': "AuthenticatedUser", 'internalid': internalid, 'email': user.email} token = jws.sign(payload, secret, algorithm='HS256') else: payload = {'expiry':expiry_s, 'type': "AnonymousUser", 'internalid': None, 'email': None} token = jws.sign(payload, secret, algorithm='HS256') logger.debug("Payload: %s" % payload) # logger.info("Token: %s" % token) return token def read_jwt(payload, secret): logger.debug("Read JWT with secret %s" % secret) logger.debug("Payload: %s" % payload) decoded_dict = json.loads(jws.verify(payload, secret, algorithms=['HS256'])) logger.info("Identity: %s" % decoded_dict) # print decoded_dict # print type(decoded_dict) username = decoded_dict.get('username', None) expiry = decoded_dict.get('expiry', None) # if datetime.datetime.utcfromtimestamp(expiry) < datetime.datetime.now(): # raise Exception("AuthenticationFailed: (_('Token Expired.')") return (username, decoded_dict)
os.mkdir(path)
conditional_block
courses.py
""" quartzscrapers.scrapers.courses.courses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains the classes for the worker threads, mangement of worker threads, and scrapers for course data. """ import re from queue import Queue from threading import Thread from collections import OrderedDict # Adds chromedriver_binary to path. import chromedriver_binary # noqa import pendulum from selenium import webdriver from selenium.webdriver.chrome.options import Options from ..utils import Scraper from ..utils.config import QUEENS_USERNAME, QUEENS_PASSWORD from .courses_helpers import ( setup_logging, parse_datetime, make_course_id, save_department_data, save_course_data, save_section_data, ) class Courses: """A scraper for Queen's courses on SOLUS. The Courses scraper creates 26 threads, one for each letter, to scrape several departments and their courses. It instantiates 26 Course workers, each of which creates a course session that handles a SOLUS login to grab its credentials via the cookies returned from a login. """ scraper_key = 'courses' location = './dumps/{}'.format(scraper_key) LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' @staticmethod def scrape(location='', *args, **kwargs): """Manage worker scrapers to parse information custom to SOLUS. Args: location (optional): String location of output files. """ if not location: location = Courses.location logger = setup_logging() logger.info('Starting Courses scrape') queue = Queue() for _ in Courses.LETTERS: course_worker = CourseWorker(queue, location) course_worker.daemon = True course_worker.start() for letter in Courses.LETTERS: queue.put(letter) queue.join() logger.info('Completed Courses scrape') class CourseWorker(Thread): """Worker thread for courses scraper.""" def __init__(self, queue, location): Thread.__init__(self) self.queue = queue self.location = location def run(self): """Instantiate CourseSession class to execute scraping.""" while True: letter = self.queue.get() course_scraper = CourseSession(self.location) course_scraper.scrape(letter) self.queue.task_done() class CourseSession: """A sub-scraper for Queen's courses.""" host = ('https://saself.ps.queensu.ca/psc/saself/EMPLOYEE/SA/c/' 'SA_LEARNER_SERVICES.SSS_BROWSE_CATLG_P.GBL') def __init__(self, location): self.scraper = Scraper() self.location = location self.logger = self.scraper.logger self.cookies = self._login() def scrape(self, letter): """Scrape information custom to SOLUS. Args: letter: A string of a letter related to course catalog. """ soup = self._request_page() departments = self._get_departments(soup, letter) self.logger.debug('Letter %s has %s depts.', letter, len(departments)) # For each department under a certain letter search. for department in departments: try: dept_data = self._parse_department_data(department) save_department_data(dept_data, self.scraper, self.location) courses = department.find_all( 'tr', id=re.compile('trCOURSE_LIST')) # For each course under a certain department. for course in courses: return_state = 'DERIVED_SAA_CRS_RETURN_PB$163$' try: course_number = course.find( 'a', id=re.compile(r'CRSE_NBR\$'))['id'] course_name = course.find( 'span', id=re.compile(r'CRSE_TITLE\$')).text if not course_number: self.logger.debug('Skipping non-existent course') continue if 'unspecified' in course_name.lower(): self.logger.debug('Skipping unspecified course') continue # Note: Selecting course only takes one parameter, # which is the ICAction. ic_action = {'ICAction': course_number} soup = self._request_page(ic_action) # Some courses have multiple offerings of the same # course, E.g: MATH121 offered on campus and online. # Check if table representing academic levels exists. if not self._has_multiple_course_offerings(soup): title = '' title_temp = soup.find( 'span', id='DERIVED_CRSECAT_DESCR200' ) if title_temp: title = title_temp.text.strip() self.logger.debug('Course title: %s', title) self._navigate_and_parse_course(soup) else: return_state = 'DERIVED_SSS_SEL_RETURN_PB$181$' title = soup.find( 'span', id='DERIVED_SSS_SEL_DESCR200' ).text.strip() self.logger.debug('Course title: %s', title) self.logger.debug('Multiple offerings found') academic_levels = self._get_academic_levels(soup) for academic_level in academic_levels: try: cr_number = academic_level['id'] cr_name = academic_level.text.strip() self.logger.debug('Career: %s', cr_name) # Go from a certain academic level to basic # course page. ic_action = {'ICAction': cr_number} soup = self._request_page(ic_action) self._navigate_and_parse_course(soup) except Exception: self.scraper.handle_error() self.logger.debug('Done careers.') except Exception: self.scraper.handle_error() # Go back to course listing. self.logger.debug('Returning to course list') ic_action = {'ICAction': return_state} self._request_page(ic_action) self.logger.debug('Done department') except Exception: self.scraper.handle_error() self.logger.debug('Done letter %s', letter) def _navigate_and_parse_course(self, soup): try: # Course parse. course_data = self._parse_course_data(soup) save_course_data(course_data, self.scraper, self.location) # Section(s) parse. if not self._has_course_sections(soup): self.logger.debug('No course sections. Skipping deep scrape') else: # Go to sections page. ic_action = {'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO'} soup = self._request_page(ic_action) terms = soup.find( 'select', id='DERIVED_SAA_CRS_TERM_ALT').find_all('option') self.logger.debug('%s terms available.', len(terms)) for term in terms: try: term_number = int(term['value']) self.logger.debug('Starting term: %s (%s)', term.text.strip(), term_number) payload = { 'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$', 'DERIVED_SAA_CRS_TERM_ALT': term_number, } soup = self._request_page(payload) # NOTE: PeopleSoft maintains state of 'View All' for # sections per every other new section you select. # This means it only needs to be expanded ONCE. if self._is_view_sections_closed(soup): self.logger.debug( "'View All' tab is minimized. " "Requesting 'View All' for current term...") payload.update( {'ICAction': 'CLASS_TBL_VW5$hviewall$0'}) soup = self._request_page(payload) self.logger.debug("'View All' request complete.") sections = self._get_sections(soup) self.logger.debug('Total sections: %s', len(sections)) for section in sections: try: section_name = soup.find( 'a', id=section).text.strip().split(' ')[0] self.logger.debug( 'Section name: %s', section_name) # Go to sections page. payload.update({'ICAction': section}) section_soup = self._request_page(payload) section_base_data, section_data = ( self._parse_course_section_data( section_soup, course_data, section_name, ) ) save_section_data( section_base_data, section_data, self.scraper, self.location ) except Exception: self.scraper.handle_error() # Go back to sections. ic_action = { 'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE' } self._request_page(ic_action) self.logger.debug('Done term') except Exception: self.scraper.handle_error() self.logger.debug('Done course') except Exception: self.scraper.handle_error() ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'} self._request_page(ic_action) def _login(self): # Emulate a SOLUS login via a Selenium webdriver. Mainly used for user # authentication. Returns session cookies, which are retrieved and used # for the remainder of this scraping session. def
(func): """Execute Selenium task and retry upon failure.""" retries = 0 while retries < 3: try: return func() except Exception as ex: self.logger.error( 'Selenium error #%s: %s', retries + 1, ex, exc_info=True) retries += 1 continue self.logger.info('Running webdriver for authentication...') chrome_options = Options() # Prevent images from loading. prefs = {'profile.managed_default_content_settings.images': 2} chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(chrome_options=chrome_options) # Timeout to for an element to be found. driver.implicitly_wait(30) driver.set_page_load_timeout(30) driver.get('https://my.queensu.ca') # Sometimes, Selenium errors out when searching for certain fields. # Retry this routine until it succeeds. run_selenium_routine( lambda: driver.find_element_by_id('username').send_keys( QUEENS_USERNAME ) ) run_selenium_routine( lambda: driver.find_element_by_id('password').send_keys( QUEENS_PASSWORD ) ) run_selenium_routine( lambda: driver.find_element_by_class_name('form-button').click() ) run_selenium_routine( lambda: driver.find_element_by_class_name('solus-tab').click() ) iframe = run_selenium_routine( lambda: driver.find_element_by_id('ptifrmtgtframe') ) driver.switch_to_frame(iframe) run_selenium_routine( lambda: driver.find_element_by_link_text('Search').click() ) session_cookies = {} for cookie in driver.get_cookies(): session_cookies[cookie['name']] = cookie['value'] driver.close() self.logger.info('Webdriver authentication complete') return session_cookies def _request_page(self, params=None): return self.scraper.http_request( url=self.host, params=params, cookies=self.cookies ) def _get_hidden_params(self, soup): # Parses HTML for hidden values that represent SOLUS parameters. SOLUS # uses dynamic parameters to represent user state given certain actions # taken. params = {} hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS')) if not hidden: hidden = soup.find( 'field', id=re.compile(r'win\ddivPSHIDDENFIELDS')) params.update({ x.get('name'): x.get('value') for x in hidden.find_all('input') }) return params def _get_departments(self, soup, letter): # Click and expand a certain letter to see departments. # E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc. def update_params_and_make_request(soup, ic_action): """Update payload with hidden params and request page.""" payload = self._get_hidden_params(soup) payload.update(ic_action) soup = self._request_page(payload) return soup # Get all departments for a certain letter. ic_action = { 'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter) } soup = update_params_and_make_request(soup, ic_action) # Expand all department courses. ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'} soup = update_params_and_make_request(soup, ic_action) departments = soup.find_all( 'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1') ) return departments def _get_sections(self, soup): return [sec['id'] for sec in soup.find_all( 'a', id=re.compile(r'CLASS_SECTION\$'))] def _has_multiple_course_offerings(self, soup): return soup.find('table', id='CRSE_OFFERINGS$scroll$0') def _has_course_sections(self, soup): return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO') def _is_view_sections_closed(self, soup): view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0') return view_all_tab and 'View All' in view_all_tab def _get_academic_levels(self, soup): return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))] def _parse_department_data(self, department): regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$') dept_str = department.find('span', id=regex_title).text.strip() self.logger.debug('Department: %s', dept_str) # Some departments have more than one hypen, such as # "MEI - Entrepreneur & Innov - Masters". # Find first index of '-' to split code from name. name_idx = dept_str.find('-') code = dept_str[:name_idx].strip() name = dept_str[name_idx + 2:].strip() data = { 'id': code, 'code': code, 'name': name, } return data def _parse_course_data(self, soup): # All HTML IDs used via regular expressions. regex_title = re.compile('DERIVED_CRSECAT_DESCR200') regex_campus = re.compile('CAMPUS_TBL_DESCR') regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG') regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE') regex_basis = re.compile('SSR_CRSE_OFF_VW_GRADING_BASIS') regex_ac_lvl = re.compile('SSR_CRSE_OFF_VW_ACAD_CAREER') regex_ac_grp = re.compile('ACAD_GROUP_TBL_DESCR') regex_ac_org = re.compile('ACAD_ORG_TBL_DESCR') regex_crse_cmps = re.compile('ACE_SSR_DUMMY_RECVW') regex_enroll_tbl = re.compile('ACE_DERIVED_CRSECAT_SSR_GROUP2') regex_enroll_div = re.compile('win0div') regex_ceab = re.compile('ACE_DERIVED_CLSRCH') def filter_course_name(soup): """Preprocess and reformat course name.""" course_title = soup.find('span', id=regex_title).text.strip() name_idx = course_title.find('-') dept_raw, course_code_raw = course_title[:name_idx - 1].split(' ') course_name = course_title[name_idx + 1:].strip() dept = dept_raw.encode('ascii', 'ignore').decode().strip() course_code = course_code_raw.encode( 'ascii', 'ignore').decode().strip() return dept, course_code, course_name def filter_description(soup): """Filter description for the course description text only.""" # TODO: Filter different text sections from description, such as # 'NOTE', 'LEARNING HOURS', etc. descr_raw = soup.find('span', id=regex_desc) if not descr_raw: return '' # If <br/> tags exist, there will be additional information other # than the description. Filter for description only. if descr_raw.find_all('br'): return descr_raw.find_all('br')[0].previous_sibling return descr_raw.text.encode('ascii', 'ignore').decode().strip() def create_dict(rows, tag, tag_id=None, start=0, enroll=False): """Create dictionary out of BeautifulSoup objects. Args: rows: List of BeautifulSoup element tags. tag: String of certain element tag to find with BeautifulSoup. tag_id: String of an element tag's ID to search for. start: Numerical index of where to preprocess string name. enroll: Boolean to determine if for the enrollment section. Returns: Dictionary of data. """ enrollment_info_map = { 'Enrollment Requirement': 'requirements', 'Add Consent': 'add_consent', 'Drop Consent': 'drop_consent', } data = {} for row in rows: name_raw, desc_raw = row.find_all(tag, id=tag_id)[start:] name = name_raw.text.strip() desc = desc_raw.text.encode('ascii', 'ignore').decode().strip() if enroll: name = enrollment_info_map[name] else: name = name.lower().replace(' / ', '_') data.update({name: desc}) return data def create_ceab_dict(soup): """Create dictionary out of CEAB BeautifulSoup HTML object.""" ceab_map = { 'Basic Sci': 'basic_sci', 'Comp St': 'comp_st', 'End Des': 'end_des', 'Eng Sci': 'eng_sci', 'Math': 'math', } ceab_data = {} ceab_units = ( soup.find('table', id=regex_ceab) # CEAB table. .find_all('tr')[1] # Data is only in 2nd row. .find_all('td')[1:] # First cell is metadata. ) # Iteration by twos. Format: Name, Units. for i in range(0, len(ceab_units), 2): name = ceab_units[i].text.strip().strip(':') units = ceab_units[i + 1].text.strip().strip(':') ceab_data.update( {ceab_map[name]: float(units) if units else 0} ) return ceab_data department, course_code, course_name = filter_course_name(soup) # =========================== Course Detail =========================== academic_level = soup.find('span', id=regex_ac_lvl).text.strip() # Note: Anomaly scenario of LAW 696 having a range of units, such as # "2.00 - 8.00". This is handled by splitting and taking the larger # number. units = float( soup.find('span', id=regex_units).text.strip().split(' - ')[-1]) grading_basis = soup.find('span', id=regex_basis).text.strip() academic_group = soup.find('span', id=regex_ac_grp).text.strip() academic_org = soup.find('span', id=regex_ac_org).text.strip() # Some sections have no campus listed. campus_raw = soup.find('span', id=regex_campus) campus = campus_raw.text.strip() if campus_raw else 'None' # Course_components is a dict of data. course_components_rows = soup.find( 'table', id=regex_crse_cmps).find_all('tr')[1:] course_components = create_dict(course_components_rows, 'td', start=1) # NOTE: The following fields potentially could be missing data. # ======================= Enrollment Information ====================== enrollment_table = soup.find('table', id=regex_enroll_tbl) enrollment_info_rows = enrollment_table.find_all( 'tr')[1:] if enrollment_table else [] # Will not exist for 2nd half of full-year courses, like MATH 121B. enroll_info = create_dict( enrollment_info_rows, 'div', tag_id=regex_enroll_div, enroll=True) # ============================ Description ============================ description = filter_description(soup) # ============================ CEAB Units ============================= ceab_data = create_ceab_dict(soup) data = { 'id': '{}-{}'.format(department, course_code), 'department': department, 'course_code': course_code, 'course_name': course_name, 'campus': campus, 'description': description, 'grading_basis': grading_basis, 'course_components': course_components, 'requirements': enroll_info.get('requirements', ''), 'add_consent': enroll_info.get('add_consent', ''), 'drop_consent': enroll_info.get('drop_consent', ''), 'academic_level': academic_level, 'academic_group': academic_group, 'academic_org': academic_org, 'units': units, 'CEAB': ceab_data, } # Retain key-value order of dictionary. return OrderedDict(data) def _parse_course_section_data(self, soup, basic_data, section_name): day_map = { 'Mo': 'Monday', 'Tu': 'Tuesday', 'We': 'Wednesday', 'Th': 'Thursday', 'Fr': 'Friday', 'Sa': 'Saturday', 'Su': 'Sunday', } # =========================== Class Details =========================== _, year_term, section_type = soup.find( 'span', id='DERIVED_CLSRCH_SSS_PAGE_KEYDESCR').text.strip().split(' | ') # Trim spaces in 'Lecture / Discussion'. section_type = section_type.replace(' ', '') year, term = year_term.split(' ') section_number = soup.find( 'span', id='DERIVED_CLSRCH_DESCR200').text.strip().split(' - ')[1][:3] class_number = soup.find( 'span', id='SSR_CLS_DTL_WRK_CLASS_NBR').text.strip() # ======================== Meeting Information ======================== course_dates = [] # See how many rows of class times there are. date_rows = soup.find_all( 'tr', id=re.compile(r'trSSR_CLSRCH_MTG\$[0-9]+_row')) # Note: Some rows have dates such as "MoTu 9:30AM - 10:30AM". for date_row in date_rows: days = [] # NOTE: Some (incorrect) sections will have a missing day, such as # listings like "12:00AM - 12:00AM" instead of "Mo 8:30AM - 9:30AM" # Filter out hyphen to ensure the ordering of start/end indices # are consistent. date_times = date_row.find( 'span', id=re.compile(r'MTG_SCHED\$') ).text.strip().replace(' - ', ' ').split(' ') if 'TBA' in date_times: start_time = end_time = 'TBA' else: # No day is listed. Mark as null. day_str = date_times[0] if len(date_times) > 2 else 'n/a' start_time = parse_datetime(date_times[-2])[1][:5] end_time = parse_datetime(date_times[-1])[1][:5] for day_short, day_long in day_map.items(): if day_short in day_str: days.append(day_long) # If no day_str exists, mark day as n/a to be flagged later. if not days: days.append(day_str) location = soup.find( 'span', id=re.compile(r'MTG_LOC\$')).text.strip() instructors_raw = soup.find( 'span', id=re.compile(r'MTG_INSTR\$') ).text.strip().split(', \r') # Turn "Last,First" into "Last, First". instructors = [ins.replace(',', ', ') for ins in instructors_raw] # Start/end dates for a partcular SECTION. meeting_dates = soup.find( 'span', id=re.compile(r'MTG_DATE\$') ).text.strip().split(' - ') if 'TBA' in meeting_dates: start_date = end_date = 'TBA' else: start_date, end_date = [ parse_datetime(date)[0] for date in soup.find('span', id=re.compile(r'MTG_DATE\$')) .text.strip().split(' - ') ] course_date = { 'day': 'TBA' if 'TBA' in date_times else None, 'start_time': start_time, 'end_time': end_time, 'start_date': start_date, 'end_date': end_date, 'location': location, 'instructors': instructors, } if course_date['day'] == 'TBA': course_dates.append(OrderedDict(course_date)) else: for day in days: # Flag non-existent day as empty string. course_date['day'] = '' if day == 'n/a' else day course_dates.append(OrderedDict(course_date)) # ========================= Class Availability ======================== enrollment_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_CAP').text.strip()) enrollment_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_TOT').text.strip()) waitlist_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_CAP').text.strip()) waitlist_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_TOT').text.strip()) # ========================== Combined Section ========================= combined_with = [] combined_rows = soup.find_all( 'tr', id=re.compile(r'trSCTN_CMBND\$[0-9]+_row')) or [] for combined_row in combined_rows: combined_section_number = (combined_row.find( 'span', id=re.compile(r'CLASS_NAME\$') ).text.split('(')[1][:-1]) if combined_section_number != class_number: combined_with.append(combined_section_number) # Used for creating unique ID. code = basic_data.get('course_code', '') dept = basic_data.get('department', '') a_lvl = basic_data.get('academic_level', '') campus = basic_data.get('campus', '') cid = make_course_id(year, term, a_lvl, campus, dept, code, '-', False) course_data = { 'id': cid, 'year': year, 'term': term, 'department': dept, 'course_code': code, 'course_name': basic_data.get('course_name', ''), 'units': basic_data.get('units', ''), 'campus': campus, 'academic_level': a_lvl, } section_data = { 'section_name': section_name, 'section_type': section_type, 'section_number': section_number, 'class_number': class_number, 'dates': course_dates, 'combined_with': combined_with, 'enrollment_capacity': enrollment_capacity, 'enrollment_total': enrollment_total, 'waitlist_capacity': waitlist_capacity, 'waitlist_total': waitlist_total, 'last_updated': pendulum.now().isoformat(), } return OrderedDict(course_data), OrderedDict(section_data)
run_selenium_routine
identifier_name
courses.py
""" quartzscrapers.scrapers.courses.courses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains the classes for the worker threads, mangement of worker threads, and scrapers for course data. """ import re from queue import Queue from threading import Thread from collections import OrderedDict # Adds chromedriver_binary to path. import chromedriver_binary # noqa import pendulum from selenium import webdriver from selenium.webdriver.chrome.options import Options from ..utils import Scraper from ..utils.config import QUEENS_USERNAME, QUEENS_PASSWORD from .courses_helpers import ( setup_logging, parse_datetime, make_course_id, save_department_data, save_course_data, save_section_data, ) class Courses: """A scraper for Queen's courses on SOLUS. The Courses scraper creates 26 threads, one for each letter, to scrape several departments and their courses. It instantiates 26 Course workers, each of which creates a course session that handles a SOLUS login to grab its credentials via the cookies returned from a login. """ scraper_key = 'courses' location = './dumps/{}'.format(scraper_key) LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' @staticmethod def scrape(location='', *args, **kwargs): """Manage worker scrapers to parse information custom to SOLUS. Args: location (optional): String location of output files. """ if not location: location = Courses.location logger = setup_logging() logger.info('Starting Courses scrape') queue = Queue() for _ in Courses.LETTERS: course_worker = CourseWorker(queue, location) course_worker.daemon = True course_worker.start() for letter in Courses.LETTERS: queue.put(letter) queue.join() logger.info('Completed Courses scrape') class CourseWorker(Thread): """Worker thread for courses scraper.""" def __init__(self, queue, location): Thread.__init__(self) self.queue = queue self.location = location def run(self): """Instantiate CourseSession class to execute scraping.""" while True: letter = self.queue.get() course_scraper = CourseSession(self.location) course_scraper.scrape(letter) self.queue.task_done() class CourseSession: """A sub-scraper for Queen's courses.""" host = ('https://saself.ps.queensu.ca/psc/saself/EMPLOYEE/SA/c/' 'SA_LEARNER_SERVICES.SSS_BROWSE_CATLG_P.GBL') def __init__(self, location): self.scraper = Scraper() self.location = location self.logger = self.scraper.logger self.cookies = self._login() def scrape(self, letter): """Scrape information custom to SOLUS. Args: letter: A string of a letter related to course catalog. """ soup = self._request_page() departments = self._get_departments(soup, letter) self.logger.debug('Letter %s has %s depts.', letter, len(departments)) # For each department under a certain letter search. for department in departments: try: dept_data = self._parse_department_data(department) save_department_data(dept_data, self.scraper, self.location) courses = department.find_all( 'tr', id=re.compile('trCOURSE_LIST')) # For each course under a certain department. for course in courses: return_state = 'DERIVED_SAA_CRS_RETURN_PB$163$' try: course_number = course.find( 'a', id=re.compile(r'CRSE_NBR\$'))['id'] course_name = course.find( 'span', id=re.compile(r'CRSE_TITLE\$')).text if not course_number: self.logger.debug('Skipping non-existent course') continue if 'unspecified' in course_name.lower(): self.logger.debug('Skipping unspecified course') continue # Note: Selecting course only takes one parameter, # which is the ICAction. ic_action = {'ICAction': course_number} soup = self._request_page(ic_action) # Some courses have multiple offerings of the same # course, E.g: MATH121 offered on campus and online. # Check if table representing academic levels exists. if not self._has_multiple_course_offerings(soup): title = '' title_temp = soup.find( 'span', id='DERIVED_CRSECAT_DESCR200' ) if title_temp: title = title_temp.text.strip() self.logger.debug('Course title: %s', title) self._navigate_and_parse_course(soup) else: return_state = 'DERIVED_SSS_SEL_RETURN_PB$181$' title = soup.find( 'span', id='DERIVED_SSS_SEL_DESCR200' ).text.strip() self.logger.debug('Course title: %s', title) self.logger.debug('Multiple offerings found') academic_levels = self._get_academic_levels(soup) for academic_level in academic_levels: try: cr_number = academic_level['id'] cr_name = academic_level.text.strip() self.logger.debug('Career: %s', cr_name) # Go from a certain academic level to basic # course page. ic_action = {'ICAction': cr_number} soup = self._request_page(ic_action) self._navigate_and_parse_course(soup) except Exception: self.scraper.handle_error() self.logger.debug('Done careers.') except Exception: self.scraper.handle_error() # Go back to course listing. self.logger.debug('Returning to course list') ic_action = {'ICAction': return_state} self._request_page(ic_action) self.logger.debug('Done department') except Exception: self.scraper.handle_error() self.logger.debug('Done letter %s', letter) def _navigate_and_parse_course(self, soup): try: # Course parse. course_data = self._parse_course_data(soup) save_course_data(course_data, self.scraper, self.location) # Section(s) parse. if not self._has_course_sections(soup): self.logger.debug('No course sections. Skipping deep scrape') else: # Go to sections page. ic_action = {'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO'} soup = self._request_page(ic_action) terms = soup.find( 'select', id='DERIVED_SAA_CRS_TERM_ALT').find_all('option') self.logger.debug('%s terms available.', len(terms)) for term in terms: try: term_number = int(term['value']) self.logger.debug('Starting term: %s (%s)', term.text.strip(), term_number) payload = { 'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$', 'DERIVED_SAA_CRS_TERM_ALT': term_number, } soup = self._request_page(payload) # NOTE: PeopleSoft maintains state of 'View All' for # sections per every other new section you select. # This means it only needs to be expanded ONCE. if self._is_view_sections_closed(soup): self.logger.debug( "'View All' tab is minimized. " "Requesting 'View All' for current term...") payload.update( {'ICAction': 'CLASS_TBL_VW5$hviewall$0'}) soup = self._request_page(payload) self.logger.debug("'View All' request complete.") sections = self._get_sections(soup) self.logger.debug('Total sections: %s', len(sections)) for section in sections: try: section_name = soup.find( 'a', id=section).text.strip().split(' ')[0] self.logger.debug( 'Section name: %s', section_name) # Go to sections page. payload.update({'ICAction': section}) section_soup = self._request_page(payload) section_base_data, section_data = ( self._parse_course_section_data( section_soup, course_data, section_name, ) ) save_section_data( section_base_data, section_data, self.scraper, self.location ) except Exception: self.scraper.handle_error() # Go back to sections. ic_action = { 'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE' } self._request_page(ic_action) self.logger.debug('Done term') except Exception: self.scraper.handle_error() self.logger.debug('Done course') except Exception: self.scraper.handle_error() ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'} self._request_page(ic_action) def _login(self): # Emulate a SOLUS login via a Selenium webdriver. Mainly used for user # authentication. Returns session cookies, which are retrieved and used # for the remainder of this scraping session. def run_selenium_routine(func): """Execute Selenium task and retry upon failure.""" retries = 0 while retries < 3: try: return func() except Exception as ex: self.logger.error( 'Selenium error #%s: %s', retries + 1, ex, exc_info=True) retries += 1 continue self.logger.info('Running webdriver for authentication...') chrome_options = Options() # Prevent images from loading. prefs = {'profile.managed_default_content_settings.images': 2} chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(chrome_options=chrome_options) # Timeout to for an element to be found. driver.implicitly_wait(30) driver.set_page_load_timeout(30) driver.get('https://my.queensu.ca') # Sometimes, Selenium errors out when searching for certain fields. # Retry this routine until it succeeds. run_selenium_routine( lambda: driver.find_element_by_id('username').send_keys( QUEENS_USERNAME ) ) run_selenium_routine( lambda: driver.find_element_by_id('password').send_keys( QUEENS_PASSWORD ) ) run_selenium_routine( lambda: driver.find_element_by_class_name('form-button').click() ) run_selenium_routine( lambda: driver.find_element_by_class_name('solus-tab').click() ) iframe = run_selenium_routine( lambda: driver.find_element_by_id('ptifrmtgtframe') ) driver.switch_to_frame(iframe) run_selenium_routine( lambda: driver.find_element_by_link_text('Search').click() ) session_cookies = {} for cookie in driver.get_cookies(): session_cookies[cookie['name']] = cookie['value'] driver.close() self.logger.info('Webdriver authentication complete') return session_cookies def _request_page(self, params=None): return self.scraper.http_request( url=self.host, params=params, cookies=self.cookies ) def _get_hidden_params(self, soup): # Parses HTML for hidden values that represent SOLUS parameters. SOLUS # uses dynamic parameters to represent user state given certain actions # taken. params = {} hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS')) if not hidden: hidden = soup.find( 'field', id=re.compile(r'win\ddivPSHIDDENFIELDS')) params.update({ x.get('name'): x.get('value') for x in hidden.find_all('input') }) return params def _get_departments(self, soup, letter): # Click and expand a certain letter to see departments. # E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc. def update_params_and_make_request(soup, ic_action): """Update payload with hidden params and request page.""" payload = self._get_hidden_params(soup) payload.update(ic_action) soup = self._request_page(payload) return soup # Get all departments for a certain letter. ic_action = { 'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter) } soup = update_params_and_make_request(soup, ic_action) # Expand all department courses. ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'} soup = update_params_and_make_request(soup, ic_action) departments = soup.find_all( 'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1') ) return departments def _get_sections(self, soup): return [sec['id'] for sec in soup.find_all( 'a', id=re.compile(r'CLASS_SECTION\$'))] def _has_multiple_course_offerings(self, soup): return soup.find('table', id='CRSE_OFFERINGS$scroll$0') def _has_course_sections(self, soup): return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO') def _is_view_sections_closed(self, soup): view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0') return view_all_tab and 'View All' in view_all_tab def _get_academic_levels(self, soup): return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))] def _parse_department_data(self, department): regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$') dept_str = department.find('span', id=regex_title).text.strip() self.logger.debug('Department: %s', dept_str) # Some departments have more than one hypen, such as # "MEI - Entrepreneur & Innov - Masters". # Find first index of '-' to split code from name. name_idx = dept_str.find('-') code = dept_str[:name_idx].strip() name = dept_str[name_idx + 2:].strip() data = { 'id': code, 'code': code, 'name': name, } return data def _parse_course_data(self, soup): # All HTML IDs used via regular expressions. regex_title = re.compile('DERIVED_CRSECAT_DESCR200') regex_campus = re.compile('CAMPUS_TBL_DESCR') regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG') regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE') regex_basis = re.compile('SSR_CRSE_OFF_VW_GRADING_BASIS') regex_ac_lvl = re.compile('SSR_CRSE_OFF_VW_ACAD_CAREER') regex_ac_grp = re.compile('ACAD_GROUP_TBL_DESCR') regex_ac_org = re.compile('ACAD_ORG_TBL_DESCR') regex_crse_cmps = re.compile('ACE_SSR_DUMMY_RECVW') regex_enroll_tbl = re.compile('ACE_DERIVED_CRSECAT_SSR_GROUP2') regex_enroll_div = re.compile('win0div') regex_ceab = re.compile('ACE_DERIVED_CLSRCH') def filter_course_name(soup):
def filter_description(soup): """Filter description for the course description text only.""" # TODO: Filter different text sections from description, such as # 'NOTE', 'LEARNING HOURS', etc. descr_raw = soup.find('span', id=regex_desc) if not descr_raw: return '' # If <br/> tags exist, there will be additional information other # than the description. Filter for description only. if descr_raw.find_all('br'): return descr_raw.find_all('br')[0].previous_sibling return descr_raw.text.encode('ascii', 'ignore').decode().strip() def create_dict(rows, tag, tag_id=None, start=0, enroll=False): """Create dictionary out of BeautifulSoup objects. Args: rows: List of BeautifulSoup element tags. tag: String of certain element tag to find with BeautifulSoup. tag_id: String of an element tag's ID to search for. start: Numerical index of where to preprocess string name. enroll: Boolean to determine if for the enrollment section. Returns: Dictionary of data. """ enrollment_info_map = { 'Enrollment Requirement': 'requirements', 'Add Consent': 'add_consent', 'Drop Consent': 'drop_consent', } data = {} for row in rows: name_raw, desc_raw = row.find_all(tag, id=tag_id)[start:] name = name_raw.text.strip() desc = desc_raw.text.encode('ascii', 'ignore').decode().strip() if enroll: name = enrollment_info_map[name] else: name = name.lower().replace(' / ', '_') data.update({name: desc}) return data def create_ceab_dict(soup): """Create dictionary out of CEAB BeautifulSoup HTML object.""" ceab_map = { 'Basic Sci': 'basic_sci', 'Comp St': 'comp_st', 'End Des': 'end_des', 'Eng Sci': 'eng_sci', 'Math': 'math', } ceab_data = {} ceab_units = ( soup.find('table', id=regex_ceab) # CEAB table. .find_all('tr')[1] # Data is only in 2nd row. .find_all('td')[1:] # First cell is metadata. ) # Iteration by twos. Format: Name, Units. for i in range(0, len(ceab_units), 2): name = ceab_units[i].text.strip().strip(':') units = ceab_units[i + 1].text.strip().strip(':') ceab_data.update( {ceab_map[name]: float(units) if units else 0} ) return ceab_data department, course_code, course_name = filter_course_name(soup) # =========================== Course Detail =========================== academic_level = soup.find('span', id=regex_ac_lvl).text.strip() # Note: Anomaly scenario of LAW 696 having a range of units, such as # "2.00 - 8.00". This is handled by splitting and taking the larger # number. units = float( soup.find('span', id=regex_units).text.strip().split(' - ')[-1]) grading_basis = soup.find('span', id=regex_basis).text.strip() academic_group = soup.find('span', id=regex_ac_grp).text.strip() academic_org = soup.find('span', id=regex_ac_org).text.strip() # Some sections have no campus listed. campus_raw = soup.find('span', id=regex_campus) campus = campus_raw.text.strip() if campus_raw else 'None' # Course_components is a dict of data. course_components_rows = soup.find( 'table', id=regex_crse_cmps).find_all('tr')[1:] course_components = create_dict(course_components_rows, 'td', start=1) # NOTE: The following fields potentially could be missing data. # ======================= Enrollment Information ====================== enrollment_table = soup.find('table', id=regex_enroll_tbl) enrollment_info_rows = enrollment_table.find_all( 'tr')[1:] if enrollment_table else [] # Will not exist for 2nd half of full-year courses, like MATH 121B. enroll_info = create_dict( enrollment_info_rows, 'div', tag_id=regex_enroll_div, enroll=True) # ============================ Description ============================ description = filter_description(soup) # ============================ CEAB Units ============================= ceab_data = create_ceab_dict(soup) data = { 'id': '{}-{}'.format(department, course_code), 'department': department, 'course_code': course_code, 'course_name': course_name, 'campus': campus, 'description': description, 'grading_basis': grading_basis, 'course_components': course_components, 'requirements': enroll_info.get('requirements', ''), 'add_consent': enroll_info.get('add_consent', ''), 'drop_consent': enroll_info.get('drop_consent', ''), 'academic_level': academic_level, 'academic_group': academic_group, 'academic_org': academic_org, 'units': units, 'CEAB': ceab_data, } # Retain key-value order of dictionary. return OrderedDict(data) def _parse_course_section_data(self, soup, basic_data, section_name): day_map = { 'Mo': 'Monday', 'Tu': 'Tuesday', 'We': 'Wednesday', 'Th': 'Thursday', 'Fr': 'Friday', 'Sa': 'Saturday', 'Su': 'Sunday', } # =========================== Class Details =========================== _, year_term, section_type = soup.find( 'span', id='DERIVED_CLSRCH_SSS_PAGE_KEYDESCR').text.strip().split(' | ') # Trim spaces in 'Lecture / Discussion'. section_type = section_type.replace(' ', '') year, term = year_term.split(' ') section_number = soup.find( 'span', id='DERIVED_CLSRCH_DESCR200').text.strip().split(' - ')[1][:3] class_number = soup.find( 'span', id='SSR_CLS_DTL_WRK_CLASS_NBR').text.strip() # ======================== Meeting Information ======================== course_dates = [] # See how many rows of class times there are. date_rows = soup.find_all( 'tr', id=re.compile(r'trSSR_CLSRCH_MTG\$[0-9]+_row')) # Note: Some rows have dates such as "MoTu 9:30AM - 10:30AM". for date_row in date_rows: days = [] # NOTE: Some (incorrect) sections will have a missing day, such as # listings like "12:00AM - 12:00AM" instead of "Mo 8:30AM - 9:30AM" # Filter out hyphen to ensure the ordering of start/end indices # are consistent. date_times = date_row.find( 'span', id=re.compile(r'MTG_SCHED\$') ).text.strip().replace(' - ', ' ').split(' ') if 'TBA' in date_times: start_time = end_time = 'TBA' else: # No day is listed. Mark as null. day_str = date_times[0] if len(date_times) > 2 else 'n/a' start_time = parse_datetime(date_times[-2])[1][:5] end_time = parse_datetime(date_times[-1])[1][:5] for day_short, day_long in day_map.items(): if day_short in day_str: days.append(day_long) # If no day_str exists, mark day as n/a to be flagged later. if not days: days.append(day_str) location = soup.find( 'span', id=re.compile(r'MTG_LOC\$')).text.strip() instructors_raw = soup.find( 'span', id=re.compile(r'MTG_INSTR\$') ).text.strip().split(', \r') # Turn "Last,First" into "Last, First". instructors = [ins.replace(',', ', ') for ins in instructors_raw] # Start/end dates for a partcular SECTION. meeting_dates = soup.find( 'span', id=re.compile(r'MTG_DATE\$') ).text.strip().split(' - ') if 'TBA' in meeting_dates: start_date = end_date = 'TBA' else: start_date, end_date = [ parse_datetime(date)[0] for date in soup.find('span', id=re.compile(r'MTG_DATE\$')) .text.strip().split(' - ') ] course_date = { 'day': 'TBA' if 'TBA' in date_times else None, 'start_time': start_time, 'end_time': end_time, 'start_date': start_date, 'end_date': end_date, 'location': location, 'instructors': instructors, } if course_date['day'] == 'TBA': course_dates.append(OrderedDict(course_date)) else: for day in days: # Flag non-existent day as empty string. course_date['day'] = '' if day == 'n/a' else day course_dates.append(OrderedDict(course_date)) # ========================= Class Availability ======================== enrollment_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_CAP').text.strip()) enrollment_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_TOT').text.strip()) waitlist_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_CAP').text.strip()) waitlist_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_TOT').text.strip()) # ========================== Combined Section ========================= combined_with = [] combined_rows = soup.find_all( 'tr', id=re.compile(r'trSCTN_CMBND\$[0-9]+_row')) or [] for combined_row in combined_rows: combined_section_number = (combined_row.find( 'span', id=re.compile(r'CLASS_NAME\$') ).text.split('(')[1][:-1]) if combined_section_number != class_number: combined_with.append(combined_section_number) # Used for creating unique ID. code = basic_data.get('course_code', '') dept = basic_data.get('department', '') a_lvl = basic_data.get('academic_level', '') campus = basic_data.get('campus', '') cid = make_course_id(year, term, a_lvl, campus, dept, code, '-', False) course_data = { 'id': cid, 'year': year, 'term': term, 'department': dept, 'course_code': code, 'course_name': basic_data.get('course_name', ''), 'units': basic_data.get('units', ''), 'campus': campus, 'academic_level': a_lvl, } section_data = { 'section_name': section_name, 'section_type': section_type, 'section_number': section_number, 'class_number': class_number, 'dates': course_dates, 'combined_with': combined_with, 'enrollment_capacity': enrollment_capacity, 'enrollment_total': enrollment_total, 'waitlist_capacity': waitlist_capacity, 'waitlist_total': waitlist_total, 'last_updated': pendulum.now().isoformat(), } return OrderedDict(course_data), OrderedDict(section_data)
"""Preprocess and reformat course name.""" course_title = soup.find('span', id=regex_title).text.strip() name_idx = course_title.find('-') dept_raw, course_code_raw = course_title[:name_idx - 1].split(' ') course_name = course_title[name_idx + 1:].strip() dept = dept_raw.encode('ascii', 'ignore').decode().strip() course_code = course_code_raw.encode( 'ascii', 'ignore').decode().strip() return dept, course_code, course_name
identifier_body
courses.py
""" quartzscrapers.scrapers.courses.courses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains the classes for the worker threads, mangement of worker threads, and scrapers for course data. """ import re from queue import Queue from threading import Thread from collections import OrderedDict # Adds chromedriver_binary to path. import chromedriver_binary # noqa import pendulum from selenium import webdriver from selenium.webdriver.chrome.options import Options from ..utils import Scraper from ..utils.config import QUEENS_USERNAME, QUEENS_PASSWORD from .courses_helpers import ( setup_logging, parse_datetime, make_course_id, save_department_data, save_course_data, save_section_data, ) class Courses: """A scraper for Queen's courses on SOLUS. The Courses scraper creates 26 threads, one for each letter, to scrape several departments and their courses. It instantiates 26 Course workers, each of which creates a course session that handles a SOLUS login to grab its credentials via the cookies returned from a login. """ scraper_key = 'courses' location = './dumps/{}'.format(scraper_key) LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' @staticmethod def scrape(location='', *args, **kwargs): """Manage worker scrapers to parse information custom to SOLUS. Args: location (optional): String location of output files. """ if not location: location = Courses.location logger = setup_logging() logger.info('Starting Courses scrape') queue = Queue() for _ in Courses.LETTERS: course_worker = CourseWorker(queue, location) course_worker.daemon = True course_worker.start() for letter in Courses.LETTERS: queue.put(letter) queue.join() logger.info('Completed Courses scrape') class CourseWorker(Thread): """Worker thread for courses scraper.""" def __init__(self, queue, location): Thread.__init__(self) self.queue = queue self.location = location def run(self): """Instantiate CourseSession class to execute scraping.""" while True: letter = self.queue.get() course_scraper = CourseSession(self.location) course_scraper.scrape(letter) self.queue.task_done() class CourseSession: """A sub-scraper for Queen's courses.""" host = ('https://saself.ps.queensu.ca/psc/saself/EMPLOYEE/SA/c/' 'SA_LEARNER_SERVICES.SSS_BROWSE_CATLG_P.GBL') def __init__(self, location): self.scraper = Scraper() self.location = location self.logger = self.scraper.logger self.cookies = self._login() def scrape(self, letter): """Scrape information custom to SOLUS. Args: letter: A string of a letter related to course catalog. """ soup = self._request_page() departments = self._get_departments(soup, letter) self.logger.debug('Letter %s has %s depts.', letter, len(departments)) # For each department under a certain letter search. for department in departments: try: dept_data = self._parse_department_data(department) save_department_data(dept_data, self.scraper, self.location) courses = department.find_all( 'tr', id=re.compile('trCOURSE_LIST')) # For each course under a certain department. for course in courses: return_state = 'DERIVED_SAA_CRS_RETURN_PB$163$' try: course_number = course.find( 'a', id=re.compile(r'CRSE_NBR\$'))['id'] course_name = course.find( 'span', id=re.compile(r'CRSE_TITLE\$')).text if not course_number: self.logger.debug('Skipping non-existent course') continue if 'unspecified' in course_name.lower(): self.logger.debug('Skipping unspecified course') continue # Note: Selecting course only takes one parameter, # which is the ICAction. ic_action = {'ICAction': course_number} soup = self._request_page(ic_action) # Some courses have multiple offerings of the same # course, E.g: MATH121 offered on campus and online. # Check if table representing academic levels exists. if not self._has_multiple_course_offerings(soup): title = '' title_temp = soup.find( 'span', id='DERIVED_CRSECAT_DESCR200' ) if title_temp: title = title_temp.text.strip() self.logger.debug('Course title: %s', title) self._navigate_and_parse_course(soup) else: return_state = 'DERIVED_SSS_SEL_RETURN_PB$181$' title = soup.find( 'span', id='DERIVED_SSS_SEL_DESCR200' ).text.strip() self.logger.debug('Course title: %s', title) self.logger.debug('Multiple offerings found') academic_levels = self._get_academic_levels(soup) for academic_level in academic_levels: try: cr_number = academic_level['id'] cr_name = academic_level.text.strip() self.logger.debug('Career: %s', cr_name) # Go from a certain academic level to basic # course page. ic_action = {'ICAction': cr_number} soup = self._request_page(ic_action) self._navigate_and_parse_course(soup) except Exception: self.scraper.handle_error() self.logger.debug('Done careers.') except Exception: self.scraper.handle_error() # Go back to course listing. self.logger.debug('Returning to course list') ic_action = {'ICAction': return_state} self._request_page(ic_action) self.logger.debug('Done department') except Exception: self.scraper.handle_error() self.logger.debug('Done letter %s', letter) def _navigate_and_parse_course(self, soup): try: # Course parse. course_data = self._parse_course_data(soup) save_course_data(course_data, self.scraper, self.location) # Section(s) parse. if not self._has_course_sections(soup): self.logger.debug('No course sections. Skipping deep scrape') else: # Go to sections page. ic_action = {'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO'} soup = self._request_page(ic_action) terms = soup.find( 'select', id='DERIVED_SAA_CRS_TERM_ALT').find_all('option') self.logger.debug('%s terms available.', len(terms)) for term in terms: try: term_number = int(term['value']) self.logger.debug('Starting term: %s (%s)', term.text.strip(), term_number) payload = { 'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$', 'DERIVED_SAA_CRS_TERM_ALT': term_number, } soup = self._request_page(payload) # NOTE: PeopleSoft maintains state of 'View All' for # sections per every other new section you select. # This means it only needs to be expanded ONCE. if self._is_view_sections_closed(soup): self.logger.debug( "'View All' tab is minimized. " "Requesting 'View All' for current term...") payload.update( {'ICAction': 'CLASS_TBL_VW5$hviewall$0'}) soup = self._request_page(payload) self.logger.debug("'View All' request complete.") sections = self._get_sections(soup) self.logger.debug('Total sections: %s', len(sections)) for section in sections: try: section_name = soup.find( 'a', id=section).text.strip().split(' ')[0] self.logger.debug( 'Section name: %s', section_name) # Go to sections page. payload.update({'ICAction': section}) section_soup = self._request_page(payload) section_base_data, section_data = ( self._parse_course_section_data( section_soup, course_data, section_name, ) ) save_section_data( section_base_data, section_data, self.scraper, self.location ) except Exception: self.scraper.handle_error() # Go back to sections. ic_action = { 'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE' } self._request_page(ic_action) self.logger.debug('Done term') except Exception: self.scraper.handle_error() self.logger.debug('Done course') except Exception: self.scraper.handle_error() ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'} self._request_page(ic_action) def _login(self): # Emulate a SOLUS login via a Selenium webdriver. Mainly used for user # authentication. Returns session cookies, which are retrieved and used # for the remainder of this scraping session. def run_selenium_routine(func): """Execute Selenium task and retry upon failure.""" retries = 0 while retries < 3: try: return func() except Exception as ex: self.logger.error( 'Selenium error #%s: %s', retries + 1, ex, exc_info=True) retries += 1 continue self.logger.info('Running webdriver for authentication...') chrome_options = Options() # Prevent images from loading. prefs = {'profile.managed_default_content_settings.images': 2} chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(chrome_options=chrome_options) # Timeout to for an element to be found. driver.implicitly_wait(30) driver.set_page_load_timeout(30) driver.get('https://my.queensu.ca') # Sometimes, Selenium errors out when searching for certain fields. # Retry this routine until it succeeds. run_selenium_routine( lambda: driver.find_element_by_id('username').send_keys( QUEENS_USERNAME ) ) run_selenium_routine( lambda: driver.find_element_by_id('password').send_keys( QUEENS_PASSWORD ) ) run_selenium_routine( lambda: driver.find_element_by_class_name('form-button').click() ) run_selenium_routine( lambda: driver.find_element_by_class_name('solus-tab').click() ) iframe = run_selenium_routine( lambda: driver.find_element_by_id('ptifrmtgtframe') ) driver.switch_to_frame(iframe) run_selenium_routine( lambda: driver.find_element_by_link_text('Search').click() ) session_cookies = {} for cookie in driver.get_cookies(): session_cookies[cookie['name']] = cookie['value'] driver.close() self.logger.info('Webdriver authentication complete') return session_cookies def _request_page(self, params=None): return self.scraper.http_request( url=self.host, params=params, cookies=self.cookies ) def _get_hidden_params(self, soup): # Parses HTML for hidden values that represent SOLUS parameters. SOLUS # uses dynamic parameters to represent user state given certain actions # taken. params = {} hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS')) if not hidden: hidden = soup.find( 'field', id=re.compile(r'win\ddivPSHIDDENFIELDS')) params.update({ x.get('name'): x.get('value') for x in hidden.find_all('input') }) return params def _get_departments(self, soup, letter): # Click and expand a certain letter to see departments. # E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc. def update_params_and_make_request(soup, ic_action): """Update payload with hidden params and request page.""" payload = self._get_hidden_params(soup) payload.update(ic_action) soup = self._request_page(payload) return soup # Get all departments for a certain letter. ic_action = { 'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter) } soup = update_params_and_make_request(soup, ic_action) # Expand all department courses. ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'} soup = update_params_and_make_request(soup, ic_action) departments = soup.find_all( 'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1') ) return departments def _get_sections(self, soup): return [sec['id'] for sec in soup.find_all( 'a', id=re.compile(r'CLASS_SECTION\$'))] def _has_multiple_course_offerings(self, soup): return soup.find('table', id='CRSE_OFFERINGS$scroll$0') def _has_course_sections(self, soup): return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO') def _is_view_sections_closed(self, soup): view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0') return view_all_tab and 'View All' in view_all_tab def _get_academic_levels(self, soup): return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))] def _parse_department_data(self, department): regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$') dept_str = department.find('span', id=regex_title).text.strip() self.logger.debug('Department: %s', dept_str) # Some departments have more than one hypen, such as # "MEI - Entrepreneur & Innov - Masters". # Find first index of '-' to split code from name. name_idx = dept_str.find('-') code = dept_str[:name_idx].strip() name = dept_str[name_idx + 2:].strip() data = { 'id': code, 'code': code, 'name': name, } return data def _parse_course_data(self, soup): # All HTML IDs used via regular expressions. regex_title = re.compile('DERIVED_CRSECAT_DESCR200') regex_campus = re.compile('CAMPUS_TBL_DESCR') regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG') regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE') regex_basis = re.compile('SSR_CRSE_OFF_VW_GRADING_BASIS') regex_ac_lvl = re.compile('SSR_CRSE_OFF_VW_ACAD_CAREER') regex_ac_grp = re.compile('ACAD_GROUP_TBL_DESCR') regex_ac_org = re.compile('ACAD_ORG_TBL_DESCR') regex_crse_cmps = re.compile('ACE_SSR_DUMMY_RECVW') regex_enroll_tbl = re.compile('ACE_DERIVED_CRSECAT_SSR_GROUP2') regex_enroll_div = re.compile('win0div') regex_ceab = re.compile('ACE_DERIVED_CLSRCH') def filter_course_name(soup): """Preprocess and reformat course name.""" course_title = soup.find('span', id=regex_title).text.strip() name_idx = course_title.find('-') dept_raw, course_code_raw = course_title[:name_idx - 1].split(' ') course_name = course_title[name_idx + 1:].strip() dept = dept_raw.encode('ascii', 'ignore').decode().strip() course_code = course_code_raw.encode( 'ascii', 'ignore').decode().strip() return dept, course_code, course_name def filter_description(soup): """Filter description for the course description text only.""" # TODO: Filter different text sections from description, such as # 'NOTE', 'LEARNING HOURS', etc. descr_raw = soup.find('span', id=regex_desc) if not descr_raw: return '' # If <br/> tags exist, there will be additional information other # than the description. Filter for description only.
return descr_raw.text.encode('ascii', 'ignore').decode().strip() def create_dict(rows, tag, tag_id=None, start=0, enroll=False): """Create dictionary out of BeautifulSoup objects. Args: rows: List of BeautifulSoup element tags. tag: String of certain element tag to find with BeautifulSoup. tag_id: String of an element tag's ID to search for. start: Numerical index of where to preprocess string name. enroll: Boolean to determine if for the enrollment section. Returns: Dictionary of data. """ enrollment_info_map = { 'Enrollment Requirement': 'requirements', 'Add Consent': 'add_consent', 'Drop Consent': 'drop_consent', } data = {} for row in rows: name_raw, desc_raw = row.find_all(tag, id=tag_id)[start:] name = name_raw.text.strip() desc = desc_raw.text.encode('ascii', 'ignore').decode().strip() if enroll: name = enrollment_info_map[name] else: name = name.lower().replace(' / ', '_') data.update({name: desc}) return data def create_ceab_dict(soup): """Create dictionary out of CEAB BeautifulSoup HTML object.""" ceab_map = { 'Basic Sci': 'basic_sci', 'Comp St': 'comp_st', 'End Des': 'end_des', 'Eng Sci': 'eng_sci', 'Math': 'math', } ceab_data = {} ceab_units = ( soup.find('table', id=regex_ceab) # CEAB table. .find_all('tr')[1] # Data is only in 2nd row. .find_all('td')[1:] # First cell is metadata. ) # Iteration by twos. Format: Name, Units. for i in range(0, len(ceab_units), 2): name = ceab_units[i].text.strip().strip(':') units = ceab_units[i + 1].text.strip().strip(':') ceab_data.update( {ceab_map[name]: float(units) if units else 0} ) return ceab_data department, course_code, course_name = filter_course_name(soup) # =========================== Course Detail =========================== academic_level = soup.find('span', id=regex_ac_lvl).text.strip() # Note: Anomaly scenario of LAW 696 having a range of units, such as # "2.00 - 8.00". This is handled by splitting and taking the larger # number. units = float( soup.find('span', id=regex_units).text.strip().split(' - ')[-1]) grading_basis = soup.find('span', id=regex_basis).text.strip() academic_group = soup.find('span', id=regex_ac_grp).text.strip() academic_org = soup.find('span', id=regex_ac_org).text.strip() # Some sections have no campus listed. campus_raw = soup.find('span', id=regex_campus) campus = campus_raw.text.strip() if campus_raw else 'None' # Course_components is a dict of data. course_components_rows = soup.find( 'table', id=regex_crse_cmps).find_all('tr')[1:] course_components = create_dict(course_components_rows, 'td', start=1) # NOTE: The following fields potentially could be missing data. # ======================= Enrollment Information ====================== enrollment_table = soup.find('table', id=regex_enroll_tbl) enrollment_info_rows = enrollment_table.find_all( 'tr')[1:] if enrollment_table else [] # Will not exist for 2nd half of full-year courses, like MATH 121B. enroll_info = create_dict( enrollment_info_rows, 'div', tag_id=regex_enroll_div, enroll=True) # ============================ Description ============================ description = filter_description(soup) # ============================ CEAB Units ============================= ceab_data = create_ceab_dict(soup) data = { 'id': '{}-{}'.format(department, course_code), 'department': department, 'course_code': course_code, 'course_name': course_name, 'campus': campus, 'description': description, 'grading_basis': grading_basis, 'course_components': course_components, 'requirements': enroll_info.get('requirements', ''), 'add_consent': enroll_info.get('add_consent', ''), 'drop_consent': enroll_info.get('drop_consent', ''), 'academic_level': academic_level, 'academic_group': academic_group, 'academic_org': academic_org, 'units': units, 'CEAB': ceab_data, } # Retain key-value order of dictionary. return OrderedDict(data) def _parse_course_section_data(self, soup, basic_data, section_name): day_map = { 'Mo': 'Monday', 'Tu': 'Tuesday', 'We': 'Wednesday', 'Th': 'Thursday', 'Fr': 'Friday', 'Sa': 'Saturday', 'Su': 'Sunday', } # =========================== Class Details =========================== _, year_term, section_type = soup.find( 'span', id='DERIVED_CLSRCH_SSS_PAGE_KEYDESCR').text.strip().split(' | ') # Trim spaces in 'Lecture / Discussion'. section_type = section_type.replace(' ', '') year, term = year_term.split(' ') section_number = soup.find( 'span', id='DERIVED_CLSRCH_DESCR200').text.strip().split(' - ')[1][:3] class_number = soup.find( 'span', id='SSR_CLS_DTL_WRK_CLASS_NBR').text.strip() # ======================== Meeting Information ======================== course_dates = [] # See how many rows of class times there are. date_rows = soup.find_all( 'tr', id=re.compile(r'trSSR_CLSRCH_MTG\$[0-9]+_row')) # Note: Some rows have dates such as "MoTu 9:30AM - 10:30AM". for date_row in date_rows: days = [] # NOTE: Some (incorrect) sections will have a missing day, such as # listings like "12:00AM - 12:00AM" instead of "Mo 8:30AM - 9:30AM" # Filter out hyphen to ensure the ordering of start/end indices # are consistent. date_times = date_row.find( 'span', id=re.compile(r'MTG_SCHED\$') ).text.strip().replace(' - ', ' ').split(' ') if 'TBA' in date_times: start_time = end_time = 'TBA' else: # No day is listed. Mark as null. day_str = date_times[0] if len(date_times) > 2 else 'n/a' start_time = parse_datetime(date_times[-2])[1][:5] end_time = parse_datetime(date_times[-1])[1][:5] for day_short, day_long in day_map.items(): if day_short in day_str: days.append(day_long) # If no day_str exists, mark day as n/a to be flagged later. if not days: days.append(day_str) location = soup.find( 'span', id=re.compile(r'MTG_LOC\$')).text.strip() instructors_raw = soup.find( 'span', id=re.compile(r'MTG_INSTR\$') ).text.strip().split(', \r') # Turn "Last,First" into "Last, First". instructors = [ins.replace(',', ', ') for ins in instructors_raw] # Start/end dates for a partcular SECTION. meeting_dates = soup.find( 'span', id=re.compile(r'MTG_DATE\$') ).text.strip().split(' - ') if 'TBA' in meeting_dates: start_date = end_date = 'TBA' else: start_date, end_date = [ parse_datetime(date)[0] for date in soup.find('span', id=re.compile(r'MTG_DATE\$')) .text.strip().split(' - ') ] course_date = { 'day': 'TBA' if 'TBA' in date_times else None, 'start_time': start_time, 'end_time': end_time, 'start_date': start_date, 'end_date': end_date, 'location': location, 'instructors': instructors, } if course_date['day'] == 'TBA': course_dates.append(OrderedDict(course_date)) else: for day in days: # Flag non-existent day as empty string. course_date['day'] = '' if day == 'n/a' else day course_dates.append(OrderedDict(course_date)) # ========================= Class Availability ======================== enrollment_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_CAP').text.strip()) enrollment_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_TOT').text.strip()) waitlist_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_CAP').text.strip()) waitlist_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_TOT').text.strip()) # ========================== Combined Section ========================= combined_with = [] combined_rows = soup.find_all( 'tr', id=re.compile(r'trSCTN_CMBND\$[0-9]+_row')) or [] for combined_row in combined_rows: combined_section_number = (combined_row.find( 'span', id=re.compile(r'CLASS_NAME\$') ).text.split('(')[1][:-1]) if combined_section_number != class_number: combined_with.append(combined_section_number) # Used for creating unique ID. code = basic_data.get('course_code', '') dept = basic_data.get('department', '') a_lvl = basic_data.get('academic_level', '') campus = basic_data.get('campus', '') cid = make_course_id(year, term, a_lvl, campus, dept, code, '-', False) course_data = { 'id': cid, 'year': year, 'term': term, 'department': dept, 'course_code': code, 'course_name': basic_data.get('course_name', ''), 'units': basic_data.get('units', ''), 'campus': campus, 'academic_level': a_lvl, } section_data = { 'section_name': section_name, 'section_type': section_type, 'section_number': section_number, 'class_number': class_number, 'dates': course_dates, 'combined_with': combined_with, 'enrollment_capacity': enrollment_capacity, 'enrollment_total': enrollment_total, 'waitlist_capacity': waitlist_capacity, 'waitlist_total': waitlist_total, 'last_updated': pendulum.now().isoformat(), } return OrderedDict(course_data), OrderedDict(section_data)
if descr_raw.find_all('br'): return descr_raw.find_all('br')[0].previous_sibling
random_line_split
courses.py
""" quartzscrapers.scrapers.courses.courses ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ This module contains the classes for the worker threads, mangement of worker threads, and scrapers for course data. """ import re from queue import Queue from threading import Thread from collections import OrderedDict # Adds chromedriver_binary to path. import chromedriver_binary # noqa import pendulum from selenium import webdriver from selenium.webdriver.chrome.options import Options from ..utils import Scraper from ..utils.config import QUEENS_USERNAME, QUEENS_PASSWORD from .courses_helpers import ( setup_logging, parse_datetime, make_course_id, save_department_data, save_course_data, save_section_data, ) class Courses: """A scraper for Queen's courses on SOLUS. The Courses scraper creates 26 threads, one for each letter, to scrape several departments and their courses. It instantiates 26 Course workers, each of which creates a course session that handles a SOLUS login to grab its credentials via the cookies returned from a login. """ scraper_key = 'courses' location = './dumps/{}'.format(scraper_key) LETTERS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' @staticmethod def scrape(location='', *args, **kwargs): """Manage worker scrapers to parse information custom to SOLUS. Args: location (optional): String location of output files. """ if not location: location = Courses.location logger = setup_logging() logger.info('Starting Courses scrape') queue = Queue() for _ in Courses.LETTERS: course_worker = CourseWorker(queue, location) course_worker.daemon = True course_worker.start() for letter in Courses.LETTERS: queue.put(letter) queue.join() logger.info('Completed Courses scrape') class CourseWorker(Thread): """Worker thread for courses scraper.""" def __init__(self, queue, location): Thread.__init__(self) self.queue = queue self.location = location def run(self): """Instantiate CourseSession class to execute scraping.""" while True: letter = self.queue.get() course_scraper = CourseSession(self.location) course_scraper.scrape(letter) self.queue.task_done() class CourseSession: """A sub-scraper for Queen's courses.""" host = ('https://saself.ps.queensu.ca/psc/saself/EMPLOYEE/SA/c/' 'SA_LEARNER_SERVICES.SSS_BROWSE_CATLG_P.GBL') def __init__(self, location): self.scraper = Scraper() self.location = location self.logger = self.scraper.logger self.cookies = self._login() def scrape(self, letter): """Scrape information custom to SOLUS. Args: letter: A string of a letter related to course catalog. """ soup = self._request_page() departments = self._get_departments(soup, letter) self.logger.debug('Letter %s has %s depts.', letter, len(departments)) # For each department under a certain letter search. for department in departments: try: dept_data = self._parse_department_data(department) save_department_data(dept_data, self.scraper, self.location) courses = department.find_all( 'tr', id=re.compile('trCOURSE_LIST')) # For each course under a certain department. for course in courses: return_state = 'DERIVED_SAA_CRS_RETURN_PB$163$' try: course_number = course.find( 'a', id=re.compile(r'CRSE_NBR\$'))['id'] course_name = course.find( 'span', id=re.compile(r'CRSE_TITLE\$')).text if not course_number: self.logger.debug('Skipping non-existent course') continue if 'unspecified' in course_name.lower(): self.logger.debug('Skipping unspecified course') continue # Note: Selecting course only takes one parameter, # which is the ICAction. ic_action = {'ICAction': course_number} soup = self._request_page(ic_action) # Some courses have multiple offerings of the same # course, E.g: MATH121 offered on campus and online. # Check if table representing academic levels exists. if not self._has_multiple_course_offerings(soup): title = '' title_temp = soup.find( 'span', id='DERIVED_CRSECAT_DESCR200' ) if title_temp: title = title_temp.text.strip() self.logger.debug('Course title: %s', title) self._navigate_and_parse_course(soup) else: return_state = 'DERIVED_SSS_SEL_RETURN_PB$181$' title = soup.find( 'span', id='DERIVED_SSS_SEL_DESCR200' ).text.strip() self.logger.debug('Course title: %s', title) self.logger.debug('Multiple offerings found') academic_levels = self._get_academic_levels(soup) for academic_level in academic_levels: try: cr_number = academic_level['id'] cr_name = academic_level.text.strip() self.logger.debug('Career: %s', cr_name) # Go from a certain academic level to basic # course page. ic_action = {'ICAction': cr_number} soup = self._request_page(ic_action) self._navigate_and_parse_course(soup) except Exception: self.scraper.handle_error() self.logger.debug('Done careers.') except Exception: self.scraper.handle_error() # Go back to course listing. self.logger.debug('Returning to course list') ic_action = {'ICAction': return_state} self._request_page(ic_action) self.logger.debug('Done department') except Exception: self.scraper.handle_error() self.logger.debug('Done letter %s', letter) def _navigate_and_parse_course(self, soup): try: # Course parse. course_data = self._parse_course_data(soup) save_course_data(course_data, self.scraper, self.location) # Section(s) parse. if not self._has_course_sections(soup): self.logger.debug('No course sections. Skipping deep scrape') else: # Go to sections page. ic_action = {'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO'} soup = self._request_page(ic_action) terms = soup.find( 'select', id='DERIVED_SAA_CRS_TERM_ALT').find_all('option') self.logger.debug('%s terms available.', len(terms)) for term in terms:
self.logger.debug('Done course') except Exception: self.scraper.handle_error() ic_action = {'ICAction': 'DERIVED_SAA_CRS_RETURN_PB$163$'} self._request_page(ic_action) def _login(self): # Emulate a SOLUS login via a Selenium webdriver. Mainly used for user # authentication. Returns session cookies, which are retrieved and used # for the remainder of this scraping session. def run_selenium_routine(func): """Execute Selenium task and retry upon failure.""" retries = 0 while retries < 3: try: return func() except Exception as ex: self.logger.error( 'Selenium error #%s: %s', retries + 1, ex, exc_info=True) retries += 1 continue self.logger.info('Running webdriver for authentication...') chrome_options = Options() # Prevent images from loading. prefs = {'profile.managed_default_content_settings.images': 2} chrome_options.add_argument('--headless') chrome_options.add_argument('--no-sandbox') chrome_options.add_argument('--disable-dev-shm-usage') chrome_options.add_experimental_option('prefs', prefs) driver = webdriver.Chrome(chrome_options=chrome_options) # Timeout to for an element to be found. driver.implicitly_wait(30) driver.set_page_load_timeout(30) driver.get('https://my.queensu.ca') # Sometimes, Selenium errors out when searching for certain fields. # Retry this routine until it succeeds. run_selenium_routine( lambda: driver.find_element_by_id('username').send_keys( QUEENS_USERNAME ) ) run_selenium_routine( lambda: driver.find_element_by_id('password').send_keys( QUEENS_PASSWORD ) ) run_selenium_routine( lambda: driver.find_element_by_class_name('form-button').click() ) run_selenium_routine( lambda: driver.find_element_by_class_name('solus-tab').click() ) iframe = run_selenium_routine( lambda: driver.find_element_by_id('ptifrmtgtframe') ) driver.switch_to_frame(iframe) run_selenium_routine( lambda: driver.find_element_by_link_text('Search').click() ) session_cookies = {} for cookie in driver.get_cookies(): session_cookies[cookie['name']] = cookie['value'] driver.close() self.logger.info('Webdriver authentication complete') return session_cookies def _request_page(self, params=None): return self.scraper.http_request( url=self.host, params=params, cookies=self.cookies ) def _get_hidden_params(self, soup): # Parses HTML for hidden values that represent SOLUS parameters. SOLUS # uses dynamic parameters to represent user state given certain actions # taken. params = {} hidden = soup.find('div', id=re.compile(r'win\ddivPSHIDDENFIELDS')) if not hidden: hidden = soup.find( 'field', id=re.compile(r'win\ddivPSHIDDENFIELDS')) params.update({ x.get('name'): x.get('value') for x in hidden.find_all('input') }) return params def _get_departments(self, soup, letter): # Click and expand a certain letter to see departments. # E.g.: 'A' has AGHE, ANAT, 'B' has BIOL, BCMP, etc. def update_params_and_make_request(soup, ic_action): """Update payload with hidden params and request page.""" payload = self._get_hidden_params(soup) payload.update(ic_action) soup = self._request_page(payload) return soup # Get all departments for a certain letter. ic_action = { 'ICAction': 'DERIVED_SSS_BCC_SSR_ALPHANUM_{}'.format(letter) } soup = update_params_and_make_request(soup, ic_action) # Expand all department courses. ic_action = {'ICAction': 'DERIVED_SSS_BCC_SSS_EXPAND_ALL$97$'} soup = update_params_and_make_request(soup, ic_action) departments = soup.find_all( 'table', id=re.compile('ACE_DERIVED_SSS_BCC_GROUP_BOX_1') ) return departments def _get_sections(self, soup): return [sec['id'] for sec in soup.find_all( 'a', id=re.compile(r'CLASS_SECTION\$'))] def _has_multiple_course_offerings(self, soup): return soup.find('table', id='CRSE_OFFERINGS$scroll$0') def _has_course_sections(self, soup): return soup.find('input', id='DERIVED_SAA_CRS_SSR_PB_GO') def _is_view_sections_closed(self, soup): view_all_tab = soup.find('a', id='CLASS_TBL_VW5$hviewall$0') return view_all_tab and 'View All' in view_all_tab def _get_academic_levels(self, soup): return [url for url in soup.find_all('a', id=re.compile(r'CAREER\$'))] def _parse_department_data(self, department): regex_title = re.compile(r'DERIVED_SSS_BCC_GROUP_BOX_1\$147\$\$span\$') dept_str = department.find('span', id=regex_title).text.strip() self.logger.debug('Department: %s', dept_str) # Some departments have more than one hypen, such as # "MEI - Entrepreneur & Innov - Masters". # Find first index of '-' to split code from name. name_idx = dept_str.find('-') code = dept_str[:name_idx].strip() name = dept_str[name_idx + 2:].strip() data = { 'id': code, 'code': code, 'name': name, } return data def _parse_course_data(self, soup): # All HTML IDs used via regular expressions. regex_title = re.compile('DERIVED_CRSECAT_DESCR200') regex_campus = re.compile('CAMPUS_TBL_DESCR') regex_desc = re.compile('SSR_CRSE_OFF_VW_DESCRLONG') regex_units = re.compile('DERIVED_CRSECAT_UNITS_RANGE') regex_basis = re.compile('SSR_CRSE_OFF_VW_GRADING_BASIS') regex_ac_lvl = re.compile('SSR_CRSE_OFF_VW_ACAD_CAREER') regex_ac_grp = re.compile('ACAD_GROUP_TBL_DESCR') regex_ac_org = re.compile('ACAD_ORG_TBL_DESCR') regex_crse_cmps = re.compile('ACE_SSR_DUMMY_RECVW') regex_enroll_tbl = re.compile('ACE_DERIVED_CRSECAT_SSR_GROUP2') regex_enroll_div = re.compile('win0div') regex_ceab = re.compile('ACE_DERIVED_CLSRCH') def filter_course_name(soup): """Preprocess and reformat course name.""" course_title = soup.find('span', id=regex_title).text.strip() name_idx = course_title.find('-') dept_raw, course_code_raw = course_title[:name_idx - 1].split(' ') course_name = course_title[name_idx + 1:].strip() dept = dept_raw.encode('ascii', 'ignore').decode().strip() course_code = course_code_raw.encode( 'ascii', 'ignore').decode().strip() return dept, course_code, course_name def filter_description(soup): """Filter description for the course description text only.""" # TODO: Filter different text sections from description, such as # 'NOTE', 'LEARNING HOURS', etc. descr_raw = soup.find('span', id=regex_desc) if not descr_raw: return '' # If <br/> tags exist, there will be additional information other # than the description. Filter for description only. if descr_raw.find_all('br'): return descr_raw.find_all('br')[0].previous_sibling return descr_raw.text.encode('ascii', 'ignore').decode().strip() def create_dict(rows, tag, tag_id=None, start=0, enroll=False): """Create dictionary out of BeautifulSoup objects. Args: rows: List of BeautifulSoup element tags. tag: String of certain element tag to find with BeautifulSoup. tag_id: String of an element tag's ID to search for. start: Numerical index of where to preprocess string name. enroll: Boolean to determine if for the enrollment section. Returns: Dictionary of data. """ enrollment_info_map = { 'Enrollment Requirement': 'requirements', 'Add Consent': 'add_consent', 'Drop Consent': 'drop_consent', } data = {} for row in rows: name_raw, desc_raw = row.find_all(tag, id=tag_id)[start:] name = name_raw.text.strip() desc = desc_raw.text.encode('ascii', 'ignore').decode().strip() if enroll: name = enrollment_info_map[name] else: name = name.lower().replace(' / ', '_') data.update({name: desc}) return data def create_ceab_dict(soup): """Create dictionary out of CEAB BeautifulSoup HTML object.""" ceab_map = { 'Basic Sci': 'basic_sci', 'Comp St': 'comp_st', 'End Des': 'end_des', 'Eng Sci': 'eng_sci', 'Math': 'math', } ceab_data = {} ceab_units = ( soup.find('table', id=regex_ceab) # CEAB table. .find_all('tr')[1] # Data is only in 2nd row. .find_all('td')[1:] # First cell is metadata. ) # Iteration by twos. Format: Name, Units. for i in range(0, len(ceab_units), 2): name = ceab_units[i].text.strip().strip(':') units = ceab_units[i + 1].text.strip().strip(':') ceab_data.update( {ceab_map[name]: float(units) if units else 0} ) return ceab_data department, course_code, course_name = filter_course_name(soup) # =========================== Course Detail =========================== academic_level = soup.find('span', id=regex_ac_lvl).text.strip() # Note: Anomaly scenario of LAW 696 having a range of units, such as # "2.00 - 8.00". This is handled by splitting and taking the larger # number. units = float( soup.find('span', id=regex_units).text.strip().split(' - ')[-1]) grading_basis = soup.find('span', id=regex_basis).text.strip() academic_group = soup.find('span', id=regex_ac_grp).text.strip() academic_org = soup.find('span', id=regex_ac_org).text.strip() # Some sections have no campus listed. campus_raw = soup.find('span', id=regex_campus) campus = campus_raw.text.strip() if campus_raw else 'None' # Course_components is a dict of data. course_components_rows = soup.find( 'table', id=regex_crse_cmps).find_all('tr')[1:] course_components = create_dict(course_components_rows, 'td', start=1) # NOTE: The following fields potentially could be missing data. # ======================= Enrollment Information ====================== enrollment_table = soup.find('table', id=regex_enroll_tbl) enrollment_info_rows = enrollment_table.find_all( 'tr')[1:] if enrollment_table else [] # Will not exist for 2nd half of full-year courses, like MATH 121B. enroll_info = create_dict( enrollment_info_rows, 'div', tag_id=regex_enroll_div, enroll=True) # ============================ Description ============================ description = filter_description(soup) # ============================ CEAB Units ============================= ceab_data = create_ceab_dict(soup) data = { 'id': '{}-{}'.format(department, course_code), 'department': department, 'course_code': course_code, 'course_name': course_name, 'campus': campus, 'description': description, 'grading_basis': grading_basis, 'course_components': course_components, 'requirements': enroll_info.get('requirements', ''), 'add_consent': enroll_info.get('add_consent', ''), 'drop_consent': enroll_info.get('drop_consent', ''), 'academic_level': academic_level, 'academic_group': academic_group, 'academic_org': academic_org, 'units': units, 'CEAB': ceab_data, } # Retain key-value order of dictionary. return OrderedDict(data) def _parse_course_section_data(self, soup, basic_data, section_name): day_map = { 'Mo': 'Monday', 'Tu': 'Tuesday', 'We': 'Wednesday', 'Th': 'Thursday', 'Fr': 'Friday', 'Sa': 'Saturday', 'Su': 'Sunday', } # =========================== Class Details =========================== _, year_term, section_type = soup.find( 'span', id='DERIVED_CLSRCH_SSS_PAGE_KEYDESCR').text.strip().split(' | ') # Trim spaces in 'Lecture / Discussion'. section_type = section_type.replace(' ', '') year, term = year_term.split(' ') section_number = soup.find( 'span', id='DERIVED_CLSRCH_DESCR200').text.strip().split(' - ')[1][:3] class_number = soup.find( 'span', id='SSR_CLS_DTL_WRK_CLASS_NBR').text.strip() # ======================== Meeting Information ======================== course_dates = [] # See how many rows of class times there are. date_rows = soup.find_all( 'tr', id=re.compile(r'trSSR_CLSRCH_MTG\$[0-9]+_row')) # Note: Some rows have dates such as "MoTu 9:30AM - 10:30AM". for date_row in date_rows: days = [] # NOTE: Some (incorrect) sections will have a missing day, such as # listings like "12:00AM - 12:00AM" instead of "Mo 8:30AM - 9:30AM" # Filter out hyphen to ensure the ordering of start/end indices # are consistent. date_times = date_row.find( 'span', id=re.compile(r'MTG_SCHED\$') ).text.strip().replace(' - ', ' ').split(' ') if 'TBA' in date_times: start_time = end_time = 'TBA' else: # No day is listed. Mark as null. day_str = date_times[0] if len(date_times) > 2 else 'n/a' start_time = parse_datetime(date_times[-2])[1][:5] end_time = parse_datetime(date_times[-1])[1][:5] for day_short, day_long in day_map.items(): if day_short in day_str: days.append(day_long) # If no day_str exists, mark day as n/a to be flagged later. if not days: days.append(day_str) location = soup.find( 'span', id=re.compile(r'MTG_LOC\$')).text.strip() instructors_raw = soup.find( 'span', id=re.compile(r'MTG_INSTR\$') ).text.strip().split(', \r') # Turn "Last,First" into "Last, First". instructors = [ins.replace(',', ', ') for ins in instructors_raw] # Start/end dates for a partcular SECTION. meeting_dates = soup.find( 'span', id=re.compile(r'MTG_DATE\$') ).text.strip().split(' - ') if 'TBA' in meeting_dates: start_date = end_date = 'TBA' else: start_date, end_date = [ parse_datetime(date)[0] for date in soup.find('span', id=re.compile(r'MTG_DATE\$')) .text.strip().split(' - ') ] course_date = { 'day': 'TBA' if 'TBA' in date_times else None, 'start_time': start_time, 'end_time': end_time, 'start_date': start_date, 'end_date': end_date, 'location': location, 'instructors': instructors, } if course_date['day'] == 'TBA': course_dates.append(OrderedDict(course_date)) else: for day in days: # Flag non-existent day as empty string. course_date['day'] = '' if day == 'n/a' else day course_dates.append(OrderedDict(course_date)) # ========================= Class Availability ======================== enrollment_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_CAP').text.strip()) enrollment_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_ENRL_TOT').text.strip()) waitlist_capacity = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_CAP').text.strip()) waitlist_total = int(soup.find( 'div', id='win0divSSR_CLS_DTL_WRK_WAIT_TOT').text.strip()) # ========================== Combined Section ========================= combined_with = [] combined_rows = soup.find_all( 'tr', id=re.compile(r'trSCTN_CMBND\$[0-9]+_row')) or [] for combined_row in combined_rows: combined_section_number = (combined_row.find( 'span', id=re.compile(r'CLASS_NAME\$') ).text.split('(')[1][:-1]) if combined_section_number != class_number: combined_with.append(combined_section_number) # Used for creating unique ID. code = basic_data.get('course_code', '') dept = basic_data.get('department', '') a_lvl = basic_data.get('academic_level', '') campus = basic_data.get('campus', '') cid = make_course_id(year, term, a_lvl, campus, dept, code, '-', False) course_data = { 'id': cid, 'year': year, 'term': term, 'department': dept, 'course_code': code, 'course_name': basic_data.get('course_name', ''), 'units': basic_data.get('units', ''), 'campus': campus, 'academic_level': a_lvl, } section_data = { 'section_name': section_name, 'section_type': section_type, 'section_number': section_number, 'class_number': class_number, 'dates': course_dates, 'combined_with': combined_with, 'enrollment_capacity': enrollment_capacity, 'enrollment_total': enrollment_total, 'waitlist_capacity': waitlist_capacity, 'waitlist_total': waitlist_total, 'last_updated': pendulum.now().isoformat(), } return OrderedDict(course_data), OrderedDict(section_data)
try: term_number = int(term['value']) self.logger.debug('Starting term: %s (%s)', term.text.strip(), term_number) payload = { 'ICAction': 'DERIVED_SAA_CRS_SSR_PB_GO$3$', 'DERIVED_SAA_CRS_TERM_ALT': term_number, } soup = self._request_page(payload) # NOTE: PeopleSoft maintains state of 'View All' for # sections per every other new section you select. # This means it only needs to be expanded ONCE. if self._is_view_sections_closed(soup): self.logger.debug( "'View All' tab is minimized. " "Requesting 'View All' for current term...") payload.update( {'ICAction': 'CLASS_TBL_VW5$hviewall$0'}) soup = self._request_page(payload) self.logger.debug("'View All' request complete.") sections = self._get_sections(soup) self.logger.debug('Total sections: %s', len(sections)) for section in sections: try: section_name = soup.find( 'a', id=section).text.strip().split(' ')[0] self.logger.debug( 'Section name: %s', section_name) # Go to sections page. payload.update({'ICAction': section}) section_soup = self._request_page(payload) section_base_data, section_data = ( self._parse_course_section_data( section_soup, course_data, section_name, ) ) save_section_data( section_base_data, section_data, self.scraper, self.location ) except Exception: self.scraper.handle_error() # Go back to sections. ic_action = { 'ICAction': 'CLASS_SRCH_WRK2_SSR_PB_CLOSE' } self._request_page(ic_action) self.logger.debug('Done term') except Exception: self.scraper.handle_error()
conditional_block
leetcodeFunc.go
package main import ( "fmt" "sort" "strconv" ) /** * Definition for singly-linked list. * type ListNode struct { * Val int * Next *ListNode * } */ /** 445. 两数相加 II */ func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { nums := changeToNums(l1) nums1 := changeToNums(l2) fmt.Println(nums, nums1) r := []int{} lg := len(nums) lg1 := len(nums1) jw := 0 if lg >= lg1 { for i := 0; i < lg; i++ { if i < lg1 { te
temp := nums[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } else { for i := 0; i < lg1; i++ { if i < lg { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } if jw > 0 { r = append(r, jw) jw-- } return SetRListNode(r) } type ListNode struct { Val int Next *ListNode } func SetRListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[len(nums)-1], Next: nil, } for i := len(nums) - 2; i >= 0; i-- { insertEndNode(n, nums[i]) } return } func SetListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[0], Next: nil, } for i := 1; i < len(nums); i++ { insertEndNode(n, nums[i]) } return } func insertEndNode(l *ListNode, val int) { if l.Next == nil { l.Next = &ListNode{ Val: val, Next: nil, } return } insertEndNode(l.Next, val) } func changeToNums(n *ListNode) (nums []int) { if n.Next == nil { nums = append(nums, n.Val) return } for { nums = append(nums, findIntFromListNode(n)) if n.Next == nil { nums = append(nums, n.Val) break } } return } func findIntFromListNode(n *ListNode) int { if n.Next == nil { return n.Val } if n.Next.Next == nil { a := n.Next.Val n.Next = nil return a } return findIntFromListNode(n.Next) } /** 剑指 Offer 61. 扑克牌中的顺子 */ func isStraight(nums []int) bool { l := len(nums) count := 0 if l != 5 { return false } sort.Ints(nums) for i := 0; i < l-1; i++ { if nums[i] == 0 { count++ } else { if nums[i+1]-nums[i] > 1 { count = count - (nums[i+1] - nums[i] - 1) } if nums[i+1]-nums[i] == 0 { return false } } } if count < 0 { return false } return true } /** 1343. 大小为 K 且平均值大于等于阈值的子数组数目 */ func numOfSubarrays(arr []int, k int, threshold int) int { count := 0 sum := 0 num := k * threshold for i := 0; i < len(arr); i++ { if i < k { sum += arr[i] } else { if i == k { if sum >= num { count++ } } sum = sum - arr[i-k] + arr[i] if sum >= num { count++ } } } if k == len(arr) { if sum >= num { count++ } } return count } /** 1330. 翻转子数组得到最大的数组值 使用数轴表示 a-----b c------d 差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值 */ func maxValueAfterReverse(nums []int) int { sum := 0 length := len(nums) a := -100000 //区间小值 b := 100000 //区间大值 for i := 0; i < length-1; i++ { sum += IntAbs(nums[i] - nums[i+1]) a = IntMax(a, IntMin(nums[i], nums[i+1])) b = IntMin(b, IntMax(nums[i], nums[i+1])) } ans := sum ans = IntMax(ans, 2*(a-b)+sum) for i := 0; i < length-1; i++ { if i > 0 { minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1]) ans = IntMax(ans, sum+minus) minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i]) ans = IntMax(ans, sum+minus) } //for j:=i+1;j<length-1;j++ { // minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1])) // ans = IntMax(ans,sum+minus) //} } return ans } func IntAbs(a int) int { if a < 0 { return -a } return a } func IntMin(a, b int) int { if a > b { return b } return a } func IntMax(a, b int) int { if a < b { return b } return a } /** 1258 查找双位数 */ func findNumbers(nums []int) int { count := 0 for _, value := range nums { valueStr := strconv.Itoa(value) if len(valueStr)%2 == 0 { count++ } } return count } /* 998. 最大二叉树 II */ func insertIntoMaxTree(root *TreeNode, val int) *TreeNode { //right 为空返回 新增又树 if root == nil { return &TreeNode{ Val: val, Left: nil, Right: nil, } } //节点大于树,原树入左侧 ,节点做根 if root.Val < val { return &TreeNode{ Val: val, Left: root, Right: nil, } } root.Right = insertIntoMaxTree(root.Right, val) return root } type TreeNode struct { Val int Left *TreeNode Right *TreeNode } /** 654. 最大二叉树 递归解法 */ func constructMaximumBinaryTree(nums []int) *TreeNode { return construct(nums, 0, len(nums)) } func construct(nums []int, l, r int) *TreeNode { if l == r { return nil } max_index := max(nums, l, r) root := &TreeNode{ Val: nums[max_index], Left: construct(nums, l, max_index), Right: construct(nums, max_index+1, r), } return root } func max(nums []int, l, r int) int { max_index := l for i := l; i < r; i++ { if nums[i] > nums[max_index] { max_index = i } } return max_index } /* 1300. 转变数组后最接近目标值的数组和 */ func findBestValue(arr []int, target int) int { //默认排序 sort.Ints(arr) length := len(arr) presum := 0 endLen := length for index, value := range arr { k := endLen - index // 条件 未改变的和 + 当前 value*剩余的项 与 target 的比较 d := presum + value*k - target if d >= 0 { //fmt.Println(d,value,endLen,index) // c小于等于0.5那么取小 大于0.5 去取上值 c := value - (d+k/2)/k return c } presum += value } return arr[length-1] } /** 1052. 爱生气的书店老板 */ func maxSatisfied(customers []int, grumpy []int, X int) int { count := 0 //默认的值 for i := 0; i < len(customers); i++ { if grumpy[i] == 0 { count += customers[i] //设为0 customers[i] = 0 } } max := 0 temp := 0 for i := 0; i < len(customers); i++ { //for j:=0 ;j<X;j++{ // if grumpy[i+j]==1{ // temp+=customers[i+j] // } //} if i < X { max += customers[i] if temp < max { temp = max } } else { temp = temp + customers[i] - customers[i-X] if temp > max { max = temp } } } return count + max } /** 747. 至少是其他数字两倍的最大数 */ func dominantIndex(nums []int) int { if len(nums) <= 1 { return 0 } big := nums[0] secdbig := nums[1] if nums[1] >= nums[0] { big = nums[0] secdbig = nums[1] } count := 0 for i := 1; i < len(nums); i++ { if big < nums[i] { secdbig = big big = nums[i] count = i } if big > nums[i] && nums[i] > secdbig { secdbig = nums[i] } } if secdbig*2 <= big { return count } return -1 } //221. 最大正方形 func maximalSquare(matrix [][]byte) int { side := 0 for i := 0; i < len(matrix); i++ { for j := 0; j < len(matrix[i]); j++ { matrix[i][j] = byte(int(matrix[i][j]) % 48) if i < 1 || j < 1 { if matrix[i][j] == 1 && side < 1 { side = 1 } continue } else { if matrix[i][j] == 1 { temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1 matrix[i][j] = byte(temp) if temp > side { side = temp } } } } } return side * side } //1201. 丑数 //请你帮忙设计一个程序,用来找出第 n 个丑数 //丑数是可以被 a 或 b 或 c 整除的 正整数。 // x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc func nthUglyNumber3(n int, a int, b int, c int) int { ab := int64(lcm(a, b)) ac := int64(lcm(a, c)) bc := int64(lcm(b, c)) abc := int64(lcm(lcm(a, b), c)) l := int64(min(a, min(b, c))) r := int64(2 * 10e9) //while (l < r) for { if l >= r { break } // 从中间开始查找,每次的偏移量是了l/2 m := l + (r-l)/2 count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc //计算的数量如果小于 n 则 l= m+1 if count < int64(n) { l = m + 1 } else { //如果大于 n ,则继续二分 r = m } } return int(l) } func min(i int, j int) int { if i <= j { return i } return j } /* *公式解法:最小公倍数=两数之积/最大公约数 */ func lcm(x, y int) int { return x * y / gcd(x, y) } /* *辗转相除法:最大公约数 *递归写法,进入运算是x和y都不为0 */ func gcd(x, y int) int { tmp := x % y if tmp > 0 { return gcd(y, tmp) } else { return y } } func canCompleteCircuit(gas []int, cost []int) int { //贪心算法 start := 0 total := 0 last := 0 for i := 0; i < len(gas); i++ { total += gas[i] - cost[i] if last < 0 { last = gas[i] - cost[i] start = i } else { last += gas[i] - cost[i] } } if total >= 0 { return start } return -1 // gas := []int{5,1,2,3,4} // cost :=[]int{4,4,1,5,1} // for i:=0;i< len(gas);i++{ // g :=gas[i] // j :=i // if g<cost[j]{ // continue // }else { // for { // if j+1>=len(cost){ // g = g - cost[j] +gas[0] // if( g<cost[0]){ // break // } // // }else{ // g = g - cost[j] +gas[j+1] // if( g<cost[j+1]){ // break // } // } // j++ // if(j >=len(cost)){ // j=0 // } // if(j==i ){ // return i // } // } // } // } //return -1 } //263. 丑数 //动态规划 1*2 1*3 1*5 加入到切片中 然后 下标+1 继续加入 func nthUglyNumber(n int) int { s := []int{} s = append(s, 1) j := 0 k := 0 h := 0 for i := 0; i < n; i++ { min := min(min(s[j]*2, s[k]*3), s[h]*5) s = append(s, min) if min == s[j]*2 { j++ } if min == s[k]*3 { k++ } if min == s[h]*5 { h++ } if i == (n - 1) { return s[i] } } return s[n-1] } //263. 丑数 func isUgly(num int) bool { if num == 0 { return false } if num == 2 || num == 3 || num == 5 || num == 1 { return true } else if num%2 == 0 { return isUgly(num / 2) } else if num%3 == 0 { return isUgly(num / 3) } else if num%5 == 0 { return isUgly(num / 5) } return false } //20. 有效的括号 func isValid(s string) bool { var m = make(map[byte]byte) m['}'] = '{' m[']'] = '[' m[')'] = '(' var slice []byte for i := 0; i < len(s); i++ { if s[i] == '{' || s[i] == '(' || s[i] == '[' { slice = append(slice, s[i]) } else { if len(slice) > 0 { if (slice[len(slice)-1]) != m[s[i]] { return false } else { slice = slice[:len(slice)-1] } } else { return false } } } return len(slice) == 0 } //367. 有效的完全平方数 func isPerfectSquare(num int) bool { //更优解 //sumnum := 1 //for //{ // num -= sumnum; // sumnum += 2; // if(num<=0){ // break // } //} //return num==0 n := getnum(num, num) for i := n; i < 2*n; i++ { if i*i == num { return true } } return false } func getnum(n int, num int) int { if n*n > num { n = getnum(n/2, num) } return n } //953. 验证外星语词典 func isAlienSorted(words []string, order string) bool { mapStr := make(map[byte]int) count := len(order) //设置大小 for i := count; i > 0; i-- { mapStr[order[count-i]] = i } mapWords := []map[int]int{} for i := 0; i < len(words); i++ { temp := make(map[int]int) for j := 0; j < len(words[i]); j++ { temp[j] = mapStr[words[i][j]] } mapWords = append(mapWords, temp) } //[map[0:97 1:112 2:112 3:108 4:101] map[0:97 1:112 2:112]] fmt.Println(mapWords) for i := 0; i < len(mapWords)-1; i++ { for j := 0; j < len(mapWords[i]); j++ { m1, ok1 := mapWords[i][j] if !ok1 { m1 = 27 } m2, ok2 := mapWords[i+1][j] if !ok2 { m2 = 27 } if m1 < m2 { return false } else if m1 == m2 { continue } else { break } } } return true } //874模拟行走机器人 func robotSim(commands []int, obstacles [][]int) int { x := 0 y := 0 ons := 0 //var mapStr ="" //for i := 0; i < len(obstacles); i++ { // mapStr =mapStr+"["+strconv.Itoa(obstacles[i][0])+","+strconv.Itoa(obstacles[i][1])+"]" //} mapStr := make(map[int][]int) for i := 0; i < len(obstacles); i++ { mapStr[obstacles[i][0]] = append(mapStr[obstacles[i][0]], obstacles[i][1]) } //op 0 y+ -2or2 y- 1 x+ -1 x- var op = 0 for i := 0; i < len(commands); i++ { if commands[i] == -2 { if op == 2 || op == -2 { op = 1 } else { op-- } } else if commands[i] == -1 { if op == 2 || op == -2 { op = -1 } else { op++ } } else if commands[i] <= 9 && commands[i] >= 1 { switch op { case 0: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y + 1) { ok1 = true break } } if ok && ok1 { break } else { y++ } } case 1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x+1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x++ } } case -1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x-1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x-- } } case 2, -2: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y - 1) { ok1 = true break } } if ok && ok1 { break } else { y-- } } } } if (x*x + y*y) > ons { ons = x*x + y*y } } return ons } //方阵中战斗力最弱的 K 行 func kWeakestRows(mat [][]int, k int) []int { type array struct { value int index int } slice1 := make([]array, len(mat)) slice2 := make([]int, k) for i := 0; i < len(mat); i++ { for j := 0; j < len(mat[i]); j++ { if mat[i][j] == 0 { slice1[i] = array{ value: j, index: i, } break } if j == len(mat[i])-1 { slice1[i] = array{ value: j + 1, index: i, } } } } //[2 4 1 2 5] for i := 0; i < len(slice1); i++ { for j := i + 1; j < len(slice1); j++ { if slice1[i].value > slice1[j].value { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } else if slice1[i].value == slice1[j].value && slice1[i].index > slice1[j].index { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } } } fmt.Println(slice1) for i := 0; i < k; i++ { slice2[i] = slice1[i].index } return slice2 }
mp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else {
conditional_block
leetcodeFunc.go
package main import ( "fmt" "sort" "strconv" ) /** * Definition for singly-linked list. * type ListNode struct { * Val int * Next *ListNode * } */ /** 445. 两数相加 II */ func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { nums := changeToNums(l1) nums1 := changeToNums(l2) fmt.Println(nums, nums1) r := []int{} lg := len(nums) lg1 := len(nums1) jw := 0 if lg >= lg1 { for i := 0; i < lg; i++ { if i < lg1 { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } else { for i := 0; i < lg1; i++ { if i < lg { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } if jw > 0 { r = append(r, jw) jw-- } return SetRListNode(r) } type ListNode struct { Val int Next *ListNode } func SetRListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[len(nums)-1], Next: nil, } for i := len(nums) - 2; i >= 0; i-- { insertEndNode(n, nums[i]) } return } func SetListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[0], Next: nil, } for i := 1; i < len(nums); i++ { insertEndNode(n, nums[i]) } return } func insertEndNode(l *ListNode, val int) { if l.Next == nil { l.Next = &ListNode{ Val: val, Next: nil, } return } insertEndNode(l.Next, val) } func changeToNums(n *ListNode) (nums []int) { if n.Next == nil { nums = append(nums, n.Val) return } for { nums = append(nums, findIntFromListNode(n)) if n.Next == nil { nums = append(nums, n.Val) break } } return } func findIntFromListNode(n *ListNode) int { if n.Next == nil { return n.Val } if n.Next.Next == nil { a := n.Next.Val n.Next = nil return a } return findIntFromListNode(n.Next) } /** 剑指 Offer 61. 扑克牌中的顺子 */ func isStraight(nums []int) bool { l := len(nums) count := 0 if l != 5 { return false } sort.Ints(nums) for i := 0; i < l-1; i++ { if nums[i] == 0 { count++ } else { if nums[i+1]-nums[i] > 1 { count = count - (nums[i+1] - nums[i] - 1) } if nums[i+1]-nums[i] == 0 { return false } } } if count < 0 { return false } return true } /** 1343. 大小为 K 且平均值大于等于阈值的子数组数目 */ func numOfSubarrays(arr []int, k int, threshold int) int { count := 0 sum := 0 num := k * threshold for i := 0; i < len(arr); i++ { if i < k { sum += arr[i] } else { if i == k { if sum >= num { count++ } } sum = sum - arr[i-k] + arr[i] if sum >= num { count++ } } } if k == len(arr) { if sum >= num { count++ } } return count } /** 1330. 翻转子数组得到最大的数组值 使用数轴表示 a-----b c------d 差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值 */ func maxValueAfterReverse(nums []int) int { sum := 0 length := len(nums) a := -100000 //区间小值 b := 100000 //区间大值 for i := 0; i < length-1; i++ { sum += IntAbs(nums[i] - nums[i+1]) a = IntMax(a, IntMin(nums[i], nums[i+1])) b = IntMin(b, IntMax(nums[i], nums[i+1])) } ans := sum ans = IntMax(ans, 2*(a-b)+sum) for i := 0; i < length-1; i++ { if i > 0 { minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1]) ans = IntMax(ans, sum+minus) minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i]) ans = IntMax(ans, sum+minus) } //for j:=i+1;j<length-1;j++ { // minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1])) // ans = IntMax(ans,sum+minus) //} } return ans } func IntAbs(a int) int { if a < 0 { return -a } return a } func IntMin(a, b int) int { if a > b { return b } return a } func IntMax(a, b int) int { if a < b { return b } return a } /** 1258 查找双位数 */ func findNumbers(nums []int) int { count := 0 for _, value := range nums { valueStr := strconv.Itoa(value) if len(valueStr)%2 == 0 { count++ } } return count
最大二叉树 II */ func insertIntoMaxTree(root *TreeNode, val int) *TreeNode { //right 为空返回 新增又树 if root == nil { return &TreeNode{ Val: val, Left: nil, Right: nil, } } //节点大于树,原树入左侧 ,节点做根 if root.Val < val { return &TreeNode{ Val: val, Left: root, Right: nil, } } root.Right = insertIntoMaxTree(root.Right, val) return root } type TreeNode struct { Val int Left *TreeNode Right *TreeNode } /** 654. 最大二叉树 递归解法 */ func constructMaximumBinaryTree(nums []int) *TreeNode { return construct(nums, 0, len(nums)) } func construct(nums []int, l, r int) *TreeNode { if l == r { return nil } max_index := max(nums, l, r) root := &TreeNode{ Val: nums[max_index], Left: construct(nums, l, max_index), Right: construct(nums, max_index+1, r), } return root } func max(nums []int, l, r int) int { max_index := l for i := l; i < r; i++ { if nums[i] > nums[max_index] { max_index = i } } return max_index } /* 1300. 转变数组后最接近目标值的数组和 */ func findBestValue(arr []int, target int) int { //默认排序 sort.Ints(arr) length := len(arr) presum := 0 endLen := length for index, value := range arr { k := endLen - index // 条件 未改变的和 + 当前 value*剩余的项 与 target 的比较 d := presum + value*k - target if d >= 0 { //fmt.Println(d,value,endLen,index) // c小于等于0.5那么取小 大于0.5 去取上值 c := value - (d+k/2)/k return c } presum += value } return arr[length-1] } /** 1052. 爱生气的书店老板 */ func maxSatisfied(customers []int, grumpy []int, X int) int { count := 0 //默认的值 for i := 0; i < len(customers); i++ { if grumpy[i] == 0 { count += customers[i] //设为0 customers[i] = 0 } } max := 0 temp := 0 for i := 0; i < len(customers); i++ { //for j:=0 ;j<X;j++{ // if grumpy[i+j]==1{ // temp+=customers[i+j] // } //} if i < X { max += customers[i] if temp < max { temp = max } } else { temp = temp + customers[i] - customers[i-X] if temp > max { max = temp } } } return count + max } /** 747. 至少是其他数字两倍的最大数 */ func dominantIndex(nums []int) int { if len(nums) <= 1 { return 0 } big := nums[0] secdbig := nums[1] if nums[1] >= nums[0] { big = nums[0] secdbig = nums[1] } count := 0 for i := 1; i < len(nums); i++ { if big < nums[i] { secdbig = big big = nums[i] count = i } if big > nums[i] && nums[i] > secdbig { secdbig = nums[i] } } if secdbig*2 <= big { return count } return -1 } //221. 最大正方形 func maximalSquare(matrix [][]byte) int { side := 0 for i := 0; i < len(matrix); i++ { for j := 0; j < len(matrix[i]); j++ { matrix[i][j] = byte(int(matrix[i][j]) % 48) if i < 1 || j < 1 { if matrix[i][j] == 1 && side < 1 { side = 1 } continue } else { if matrix[i][j] == 1 { temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1 matrix[i][j] = byte(temp) if temp > side { side = temp } } } } } return side * side } //1201. 丑数 //请你帮忙设计一个程序,用来找出第 n 个丑数 //丑数是可以被 a 或 b 或 c 整除的 正整数。 // x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc func nthUglyNumber3(n int, a int, b int, c int) int { ab := int64(lcm(a, b)) ac := int64(lcm(a, c)) bc := int64(lcm(b, c)) abc := int64(lcm(lcm(a, b), c)) l := int64(min(a, min(b, c))) r := int64(2 * 10e9) //while (l < r) for { if l >= r { break } // 从中间开始查找,每次的偏移量是了l/2 m := l + (r-l)/2 count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc //计算的数量如果小于 n 则 l= m+1 if count < int64(n) { l = m + 1 } else { //如果大于 n ,则继续二分 r = m } } return int(l) } func min(i int, j int) int { if i <= j { return i } return j } /* *公式解法:最小公倍数=两数之积/最大公约数 */ func lcm(x, y int) int { return x * y / gcd(x, y) } /* *辗转相除法:最大公约数 *递归写法,进入运算是x和y都不为0 */ func gcd(x, y int) int { tmp := x % y if tmp > 0 { return gcd(y, tmp) } else { return y } } func canCompleteCircuit(gas []int, cost []int) int { //贪心算法 start := 0 total := 0 last := 0 for i := 0; i < len(gas); i++ { total += gas[i] - cost[i] if last < 0 { last = gas[i] - cost[i] start = i } else { last += gas[i] - cost[i] } } if total >= 0 { return start } return -1 // gas := []int{5,1,2,3,4} // cost :=[]int{4,4,1,5,1} // for i:=0;i< len(gas);i++{ // g :=gas[i] // j :=i // if g<cost[j]{ // continue // }else { // for { // if j+1>=len(cost){ // g = g - cost[j] +gas[0] // if( g<cost[0]){ // break // } // // }else{ // g = g - cost[j] +gas[j+1] // if( g<cost[j+1]){ // break // } // } // j++ // if(j >=len(cost)){ // j=0 // } // if(j==i ){ // return i // } // } // } // } //return -1 } //263. 丑数 //动态规划 1*2 1*3 1*5 加入到切片中 然后 下标+1 继续加入 func nthUglyNumber(n int) int { s := []int{} s = append(s, 1) j := 0 k := 0 h := 0 for i := 0; i < n; i++ { min := min(min(s[j]*2, s[k]*3), s[h]*5) s = append(s, min) if min == s[j]*2 { j++ } if min == s[k]*3 { k++ } if min == s[h]*5 { h++ } if i == (n - 1) { return s[i] } } return s[n-1] } //263. 丑数 func isUgly(num int) bool { if num == 0 { return false } if num == 2 || num == 3 || num == 5 || num == 1 { return true } else if num%2 == 0 { return isUgly(num / 2) } else if num%3 == 0 { return isUgly(num / 3) } else if num%5 == 0 { return isUgly(num / 5) } return false } //20. 有效的括号 func isValid(s string) bool { var m = make(map[byte]byte) m['}'] = '{' m[']'] = '[' m[')'] = '(' var slice []byte for i := 0; i < len(s); i++ { if s[i] == '{' || s[i] == '(' || s[i] == '[' { slice = append(slice, s[i]) } else { if len(slice) > 0 { if (slice[len(slice)-1]) != m[s[i]] { return false } else { slice = slice[:len(slice)-1] } } else { return false } } } return len(slice) == 0 } //367. 有效的完全平方数 func isPerfectSquare(num int) bool { //更优解 //sumnum := 1 //for //{ // num -= sumnum; // sumnum += 2; // if(num<=0){ // break // } //} //return num==0 n := getnum(num, num) for i := n; i < 2*n; i++ { if i*i == num { return true } } return false } func getnum(n int, num int) int { if n*n > num { n = getnum(n/2, num) } return n } //953. 验证外星语词典 func isAlienSorted(words []string, order string) bool { mapStr := make(map[byte]int) count := len(order) //设置大小 for i := count; i > 0; i-- { mapStr[order[count-i]] = i } mapWords := []map[int]int{} for i := 0; i < len(words); i++ { temp := make(map[int]int) for j := 0; j < len(words[i]); j++ { temp[j] = mapStr[words[i][j]] } mapWords = append(mapWords, temp) } //[map[0:97 1:112 2:112 3:108 4:101] map[0:97 1:112 2:112]] fmt.Println(mapWords) for i := 0; i < len(mapWords)-1; i++ { for j := 0; j < len(mapWords[i]); j++ { m1, ok1 := mapWords[i][j] if !ok1 { m1 = 27 } m2, ok2 := mapWords[i+1][j] if !ok2 { m2 = 27 } if m1 < m2 { return false } else if m1 == m2 { continue } else { break } } } return true } //874模拟行走机器人 func robotSim(commands []int, obstacles [][]int) int { x := 0 y := 0 ons := 0 //var mapStr ="" //for i := 0; i < len(obstacles); i++ { // mapStr =mapStr+"["+strconv.Itoa(obstacles[i][0])+","+strconv.Itoa(obstacles[i][1])+"]" //} mapStr := make(map[int][]int) for i := 0; i < len(obstacles); i++ { mapStr[obstacles[i][0]] = append(mapStr[obstacles[i][0]], obstacles[i][1]) } //op 0 y+ -2or2 y- 1 x+ -1 x- var op = 0 for i := 0; i < len(commands); i++ { if commands[i] == -2 { if op == 2 || op == -2 { op = 1 } else { op-- } } else if commands[i] == -1 { if op == 2 || op == -2 { op = -1 } else { op++ } } else if commands[i] <= 9 && commands[i] >= 1 { switch op { case 0: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y + 1) { ok1 = true break } } if ok && ok1 { break } else { y++ } } case 1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x+1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x++ } } case -1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x-1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x-- } } case 2, -2: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y - 1) { ok1 = true break } } if ok && ok1 { break } else { y-- } } } } if (x*x + y*y) > ons { ons = x*x + y*y } } return ons } //方阵中战斗力最弱的 K 行 func kWeakestRows(mat [][]int, k int) []int { type array struct { value int index int } slice1 := make([]array, len(mat)) slice2 := make([]int, k) for i := 0; i < len(mat); i++ { for j := 0; j < len(mat[i]); j++ { if mat[i][j] == 0 { slice1[i] = array{ value: j, index: i, } break } if j == len(mat[i])-1 { slice1[i] = array{ value: j + 1, index: i, } } } } //[2 4 1 2 5] for i := 0; i < len(slice1); i++ { for j := i + 1; j < len(slice1); j++ { if slice1[i].value > slice1[j].value { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } else if slice1[i].value == slice1[j].value && slice1[i].index > slice1[j].index { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } } } fmt.Println(slice1) for i := 0; i < k; i++ { slice2[i] = slice1[i].index } return slice2 }
} /* 998.
identifier_name
leetcodeFunc.go
package main import ( "fmt" "sort" "strconv" ) /** * Definition for singly-linked list. * type ListNode struct { * Val int * Next *ListNode * } */ /** 445. 两数相加 II */ func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { nums := changeToNums(l1) nums1 := changeToNums(l2) fmt.Println(nums, nums1) r := []int{} lg := len(nums) lg1 := len(nums1) jw := 0 if lg >= lg1 { for i := 0; i < lg; i++ { if i < lg1 { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } else { for i := 0; i < lg1; i++ { if i < lg { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } if jw > 0 { r = append(r, jw) jw-- } return SetRListNode(r) } type ListNode struct { Val int Next *ListNode } func SetRListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[len(nums)-1], Next: nil, } for i := len(nums) - 2; i >= 0; i-- { insertEndNode(n, nums[i]) } return } func SetListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[0], Next: nil, } for i := 1; i < len(nums); i++ { insertEndNode(n, nums[i]) } return } func insertEndNode(l *ListNode, val int) { if l.Next == nil { l.Next = &ListNode{ Val: val, Next: nil, } return } insertEndNode(l.Next, val) } func changeToNums(n *ListNode) (nums []int) { if n.Next == nil { nums = append(nums, n.Val) return } for { nums = append(nums, findIntFromListNode(n)) if n.Next == nil { nums = append(nums, n.Val) break } } return } func findIntFromListNode(n *ListNode) int { if n.Next == nil { return n.Val } if n.Next.Next == nil { a := n.Next.Val n.Next = nil return a } return findIntFromListNode(n.Next) } /** 剑指 Offer 61. 扑克牌中的顺子 */ func isStraight(nums []int) bool { l := len(nums) count := 0 if l != 5 { return false } sort.Ints(nums) for i := 0; i < l-1; i++ { if nums[i] == 0 { count++ } else { if nums[i+1]-nums[i] > 1 { count = count - (nums[i+1] - nums[i] - 1) } if nums[i+1]-nums[i] == 0 { return false } } } if count < 0 { return false } return true } /** 1343. 大小为 K 且平均值大于等于阈值的子数组数目 */ func numOfSubarrays(arr []int, k int, threshold int) int { count := 0 sum := 0 num := k * threshold for i := 0; i < len(arr); i++ { if i < k { sum += arr[i] } else { if i == k { if sum >= num { count++ } } sum = sum - arr[i-k] + arr[i] if sum >= num { count++ } } } if k == len(arr) { if sum >= num { count++ } } return count } /** 1330. 翻转子数组得到最大的数组值 使用数轴表示 a-----b c------d 差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值 */ func maxValueAfterReverse(nums []int) int { sum := 0 length := len(nums) a := -100000 //区间小值 b := 100000 //区间大值 for i := 0; i < length-1; i++ { sum += IntAbs(nums[i] - nums[i+1]) a = IntMax(a, IntMin(nums[i], nums[i+1])) b = IntMin(b, IntMax(nums[i], nums[i+1])) } ans := sum ans = IntMax(ans, 2*(a-b)+sum) for i := 0; i < length-1; i++ { if i > 0 { minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1]) ans = IntMax(ans, sum+minus) minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i]) ans = IntMax(ans, sum+minus) } //for j:=i+1;j<length-1;j++ { // minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1])) // ans = IntMax(ans,sum+minus) //} } return ans } func IntAbs(a int) int { if a < 0 { return -a } return a } func IntMin(a, b int) int { if a > b { return b } return a } func IntMax(a, b int) int { if a < b { return b } return a } /** 1258 查找双位数 */ func findNumbers(nums []int) int { count := 0 for _, value := range nums { valueStr := strconv.Itoa(value) if len(valueStr)%2 == 0 { count++ } } return count } /* 998. 最大二叉树 II */ func insertIntoMaxTree(root *TreeNode, val int) *TreeNode { //right 为空返回 新增又树 if root == nil { return &TreeNode{ Val: val, Left: nil, Right: nil, } } //节点大于树,原树入左侧 ,节点做根 if root.Val < val { return &TreeNode{ Val: val, Left: root, Right: nil, } } root.Right = insertIntoMaxTree(root.Right, val) return root } type TreeNode struct { Val int Left *TreeNode Right *TreeNode } /** 654. 最大二叉树 递归解法 */ func constructMaximumBinaryTree(nums []int) *TreeNode { return construct(nums, 0, len(nums)) } func construct(nums []int, l, r int) *TreeNode { if l == r { return nil } max_index := max(nums, l, r) root := &TreeNode{ Val: nums[max_index], Left: construct(nums, l, max_index), Right: construct(nums, max_index+1, r), } return root } func max(nums []int, l, r int) int { max_index := l for i := l; i < r; i++ { if nums[i] > nums[max_index] { max_index = i } } return max_index } /* 1300. 转变数组后最接近目标值的数组和 */ func findBestValue(arr []int, target int) int { //默认排序 sort.Ints(arr) length := len(arr)
arget 的比较 d := presum + value*k - target if d >= 0 { //fmt.Println(d,value,endLen,index) // c小于等于0.5那么取小 大于0.5 去取上值 c := value - (d+k/2)/k return c } presum += value } return arr[length-1] } /** 1052. 爱生气的书店老板 */ func maxSatisfied(customers []int, grumpy []int, X int) int { count := 0 //默认的值 for i := 0; i < len(customers); i++ { if grumpy[i] == 0 { count += customers[i] //设为0 customers[i] = 0 } } max := 0 temp := 0 for i := 0; i < len(customers); i++ { //for j:=0 ;j<X;j++{ // if grumpy[i+j]==1{ // temp+=customers[i+j] // } //} if i < X { max += customers[i] if temp < max { temp = max } } else { temp = temp + customers[i] - customers[i-X] if temp > max { max = temp } } } return count + max } /** 747. 至少是其他数字两倍的最大数 */ func dominantIndex(nums []int) int { if len(nums) <= 1 { return 0 } big := nums[0] secdbig := nums[1] if nums[1] >= nums[0] { big = nums[0] secdbig = nums[1] } count := 0 for i := 1; i < len(nums); i++ { if big < nums[i] { secdbig = big big = nums[i] count = i } if big > nums[i] && nums[i] > secdbig { secdbig = nums[i] } } if secdbig*2 <= big { return count } return -1 } //221. 最大正方形 func maximalSquare(matrix [][]byte) int { side := 0 for i := 0; i < len(matrix); i++ { for j := 0; j < len(matrix[i]); j++ { matrix[i][j] = byte(int(matrix[i][j]) % 48) if i < 1 || j < 1 { if matrix[i][j] == 1 && side < 1 { side = 1 } continue } else { if matrix[i][j] == 1 { temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1 matrix[i][j] = byte(temp) if temp > side { side = temp } } } } } return side * side } //1201. 丑数 //请你帮忙设计一个程序,用来找出第 n 个丑数 //丑数是可以被 a 或 b 或 c 整除的 正整数。 // x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc func nthUglyNumber3(n int, a int, b int, c int) int { ab := int64(lcm(a, b)) ac := int64(lcm(a, c)) bc := int64(lcm(b, c)) abc := int64(lcm(lcm(a, b), c)) l := int64(min(a, min(b, c))) r := int64(2 * 10e9) //while (l < r) for { if l >= r { break } // 从中间开始查找,每次的偏移量是了l/2 m := l + (r-l)/2 count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc //计算的数量如果小于 n 则 l= m+1 if count < int64(n) { l = m + 1 } else { //如果大于 n ,则继续二分 r = m } } return int(l) } func min(i int, j int) int { if i <= j { return i } return j } /* *公式解法:最小公倍数=两数之积/最大公约数 */ func lcm(x, y int) int { return x * y / gcd(x, y) } /* *辗转相除法:最大公约数 *递归写法,进入运算是x和y都不为0 */ func gcd(x, y int) int { tmp := x % y if tmp > 0 { return gcd(y, tmp) } else { return y } } func canCompleteCircuit(gas []int, cost []int) int { //贪心算法 start := 0 total := 0 last := 0 for i := 0; i < len(gas); i++ { total += gas[i] - cost[i] if last < 0 { last = gas[i] - cost[i] start = i } else { last += gas[i] - cost[i] } } if total >= 0 { return start } return -1 // gas := []int{5,1,2,3,4} // cost :=[]int{4,4,1,5,1} // for i:=0;i< len(gas);i++{ // g :=gas[i] // j :=i // if g<cost[j]{ // continue // }else { // for { // if j+1>=len(cost){ // g = g - cost[j] +gas[0] // if( g<cost[0]){ // break // } // // }else{ // g = g - cost[j] +gas[j+1] // if( g<cost[j+1]){ // break // } // } // j++ // if(j >=len(cost)){ // j=0 // } // if(j==i ){ // return i // } // } // } // } //return -1 } //263. 丑数 //动态规划 1*2 1*3 1*5 加入到切片中 然后 下标+1 继续加入 func nthUglyNumber(n int) int { s := []int{} s = append(s, 1) j := 0 k := 0 h := 0 for i := 0; i < n; i++ { min := min(min(s[j]*2, s[k]*3), s[h]*5) s = append(s, min) if min == s[j]*2 { j++ } if min == s[k]*3 { k++ } if min == s[h]*5 { h++ } if i == (n - 1) { return s[i] } } return s[n-1] } //263. 丑数 func isUgly(num int) bool { if num == 0 { return false } if num == 2 || num == 3 || num == 5 || num == 1 { return true } else if num%2 == 0 { return isUgly(num / 2) } else if num%3 == 0 { return isUgly(num / 3) } else if num%5 == 0 { return isUgly(num / 5) } return false } //20. 有效的括号 func isValid(s string) bool { var m = make(map[byte]byte) m['}'] = '{' m[']'] = '[' m[')'] = '(' var slice []byte for i := 0; i < len(s); i++ { if s[i] == '{' || s[i] == '(' || s[i] == '[' { slice = append(slice, s[i]) } else { if len(slice) > 0 { if (slice[len(slice)-1]) != m[s[i]] { return false } else { slice = slice[:len(slice)-1] } } else { return false } } } return len(slice) == 0 } //367. 有效的完全平方数 func isPerfectSquare(num int) bool { //更优解 //sumnum := 1 //for //{ // num -= sumnum; // sumnum += 2; // if(num<=0){ // break // } //} //return num==0 n := getnum(num, num) for i := n; i < 2*n; i++ { if i*i == num { return true } } return false } func getnum(n int, num int) int { if n*n > num { n = getnum(n/2, num) } return n } //953. 验证外星语词典 func isAlienSorted(words []string, order string) bool { mapStr := make(map[byte]int) count := len(order) //设置大小 for i := count; i > 0; i-- { mapStr[order[count-i]] = i } mapWords := []map[int]int{} for i := 0; i < len(words); i++ { temp := make(map[int]int) for j := 0; j < len(words[i]); j++ { temp[j] = mapStr[words[i][j]] } mapWords = append(mapWords, temp) } //[map[0:97 1:112 2:112 3:108 4:101] map[0:97 1:112 2:112]] fmt.Println(mapWords) for i := 0; i < len(mapWords)-1; i++ { for j := 0; j < len(mapWords[i]); j++ { m1, ok1 := mapWords[i][j] if !ok1 { m1 = 27 } m2, ok2 := mapWords[i+1][j] if !ok2 { m2 = 27 } if m1 < m2 { return false } else if m1 == m2 { continue } else { break } } } return true } //874模拟行走机器人 func robotSim(commands []int, obstacles [][]int) int { x := 0 y := 0 ons := 0 //var mapStr ="" //for i := 0; i < len(obstacles); i++ { // mapStr =mapStr+"["+strconv.Itoa(obstacles[i][0])+","+strconv.Itoa(obstacles[i][1])+"]" //} mapStr := make(map[int][]int) for i := 0; i < len(obstacles); i++ { mapStr[obstacles[i][0]] = append(mapStr[obstacles[i][0]], obstacles[i][1]) } //op 0 y+ -2or2 y- 1 x+ -1 x- var op = 0 for i := 0; i < len(commands); i++ { if commands[i] == -2 { if op == 2 || op == -2 { op = 1 } else { op-- } } else if commands[i] == -1 { if op == 2 || op == -2 { op = -1 } else { op++ } } else if commands[i] <= 9 && commands[i] >= 1 { switch op { case 0: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y + 1) { ok1 = true break } } if ok && ok1 { break } else { y++ } } case 1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x+1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x++ } } case -1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x-1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x-- } } case 2, -2: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y - 1) { ok1 = true break } } if ok && ok1 { break } else { y-- } } } } if (x*x + y*y) > ons { ons = x*x + y*y } } return ons } //方阵中战斗力最弱的 K 行 func kWeakestRows(mat [][]int, k int) []int { type array struct { value int index int } slice1 := make([]array, len(mat)) slice2 := make([]int, k) for i := 0; i < len(mat); i++ { for j := 0; j < len(mat[i]); j++ { if mat[i][j] == 0 { slice1[i] = array{ value: j, index: i, } break } if j == len(mat[i])-1 { slice1[i] = array{ value: j + 1, index: i, } } } } //[2 4 1 2 5] for i := 0; i < len(slice1); i++ { for j := i + 1; j < len(slice1); j++ { if slice1[i].value > slice1[j].value { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } else if slice1[i].value == slice1[j].value && slice1[i].index > slice1[j].index { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } } } fmt.Println(slice1) for i := 0; i < k; i++ { slice2[i] = slice1[i].index } return slice2 }
presum := 0 endLen := length for index, value := range arr { k := endLen - index // 条件 未改变的和 + 当前 value*剩余的项 与 t
identifier_body
leetcodeFunc.go
package main import ( "fmt" "sort" "strconv" ) /** * Definition for singly-linked list. * type ListNode struct { * Val int * Next *ListNode * } */ /** 445. 两数相加 II */ func addTwoNumbers(l1 *ListNode, l2 *ListNode) *ListNode { nums := changeToNums(l1) nums1 := changeToNums(l2) fmt.Println(nums, nums1) r := []int{} lg := len(nums) lg1 := len(nums1) jw := 0 if lg >= lg1 { for i := 0; i < lg; i++ { if i < lg1 { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } else { for i := 0; i < lg1; i++ { if i < lg { temp := nums[i] + nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } else { temp := nums1[i] + jw if jw > 0 { jw-- } if temp >= 10 { temp = temp - 10 jw++ } r = append(r, temp) } } } if jw > 0 { r = append(r, jw) jw-- } return SetRListNode(r) } type ListNode struct { Val int Next *ListNode } func SetRListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[len(nums)-1], Next: nil, } for i := len(nums) - 2; i >= 0; i-- { insertEndNode(n, nums[i]) } return } func SetListNode(nums []int) (n *ListNode) { n = &ListNode{ Val: nums[0], Next: nil, } for i := 1; i < len(nums); i++ { insertEndNode(n, nums[i]) } return } func insertEndNode(l *ListNode, val int) { if l.Next == nil { l.Next = &ListNode{ Val: val, Next: nil, } return } insertEndNode(l.Next, val) } func changeToNums(n *ListNode) (nums []int) { if n.Next == nil { nums = append(nums, n.Val) return } for { nums = append(nums, findIntFromListNode(n)) if n.Next == nil { nums = append(nums, n.Val) break } } return } func findIntFromListNode(n *ListNode) int { if n.Next == nil { return n.Val } if n.Next.Next == nil { a := n.Next.Val n.Next = nil return a } return findIntFromListNode(n.Next) } /** 剑指 Offer 61. 扑克牌中的顺子 */ func isStraight(nums []int) bool { l := len(nums) count := 0 if l != 5 { return false } sort.Ints(nums) for i := 0; i < l-1; i++ { if nums[i] == 0 { count++ } else { if nums[i+1]-nums[i] > 1 { count = count - (nums[i+1] - nums[i] - 1) } if nums[i+1]-nums[i] == 0 { return false } } } if count < 0 { return false } return true } /** 1343. 大小为 K 且平均值大于等于阈值的子数组数目 */ func numOfSubarrays(arr []int, k int, threshold int) int { count := 0 sum := 0 num := k * threshold for i := 0; i < len(arr); i++ { if i < k { sum += arr[i] } else { if i == k { if sum >= num { count++ } } sum = sum - arr[i-k] + arr[i] if sum >= num { count++ } } } if k == len(arr) { if sum >= num { count++ }
1330. 翻转子数组得到最大的数组值 使用数轴表示 a-----b c------d 差值为 |c-b|*2 所以需要 b 最小 c 最大 才能获得最大的值 */ func maxValueAfterReverse(nums []int) int { sum := 0 length := len(nums) a := -100000 //区间小值 b := 100000 //区间大值 for i := 0; i < length-1; i++ { sum += IntAbs(nums[i] - nums[i+1]) a = IntMax(a, IntMin(nums[i], nums[i+1])) b = IntMin(b, IntMax(nums[i], nums[i+1])) } ans := sum ans = IntMax(ans, 2*(a-b)+sum) for i := 0; i < length-1; i++ { if i > 0 { minus := IntAbs(nums[0]-nums[i+1]) - IntAbs(nums[i]-nums[i+1]) ans = IntMax(ans, sum+minus) minus = IntAbs(nums[i-1]-nums[length-1]) - IntAbs(nums[i-1]-nums[i]) ans = IntMax(ans, sum+minus) } //for j:=i+1;j<length-1;j++ { // minus:= IntAbs(nums[i]-nums[j])+IntAbs(nums[i+1]-nums[j+1])-(IntAbs(nums[i]-nums[i+1])+IntAbs(nums[j]-nums[j+1])) // ans = IntMax(ans,sum+minus) //} } return ans } func IntAbs(a int) int { if a < 0 { return -a } return a } func IntMin(a, b int) int { if a > b { return b } return a } func IntMax(a, b int) int { if a < b { return b } return a } /** 1258 查找双位数 */ func findNumbers(nums []int) int { count := 0 for _, value := range nums { valueStr := strconv.Itoa(value) if len(valueStr)%2 == 0 { count++ } } return count } /* 998. 最大二叉树 II */ func insertIntoMaxTree(root *TreeNode, val int) *TreeNode { //right 为空返回 新增又树 if root == nil { return &TreeNode{ Val: val, Left: nil, Right: nil, } } //节点大于树,原树入左侧 ,节点做根 if root.Val < val { return &TreeNode{ Val: val, Left: root, Right: nil, } } root.Right = insertIntoMaxTree(root.Right, val) return root } type TreeNode struct { Val int Left *TreeNode Right *TreeNode } /** 654. 最大二叉树 递归解法 */ func constructMaximumBinaryTree(nums []int) *TreeNode { return construct(nums, 0, len(nums)) } func construct(nums []int, l, r int) *TreeNode { if l == r { return nil } max_index := max(nums, l, r) root := &TreeNode{ Val: nums[max_index], Left: construct(nums, l, max_index), Right: construct(nums, max_index+1, r), } return root } func max(nums []int, l, r int) int { max_index := l for i := l; i < r; i++ { if nums[i] > nums[max_index] { max_index = i } } return max_index } /* 1300. 转变数组后最接近目标值的数组和 */ func findBestValue(arr []int, target int) int { //默认排序 sort.Ints(arr) length := len(arr) presum := 0 endLen := length for index, value := range arr { k := endLen - index // 条件 未改变的和 + 当前 value*剩余的项 与 target 的比较 d := presum + value*k - target if d >= 0 { //fmt.Println(d,value,endLen,index) // c小于等于0.5那么取小 大于0.5 去取上值 c := value - (d+k/2)/k return c } presum += value } return arr[length-1] } /** 1052. 爱生气的书店老板 */ func maxSatisfied(customers []int, grumpy []int, X int) int { count := 0 //默认的值 for i := 0; i < len(customers); i++ { if grumpy[i] == 0 { count += customers[i] //设为0 customers[i] = 0 } } max := 0 temp := 0 for i := 0; i < len(customers); i++ { //for j:=0 ;j<X;j++{ // if grumpy[i+j]==1{ // temp+=customers[i+j] // } //} if i < X { max += customers[i] if temp < max { temp = max } } else { temp = temp + customers[i] - customers[i-X] if temp > max { max = temp } } } return count + max } /** 747. 至少是其他数字两倍的最大数 */ func dominantIndex(nums []int) int { if len(nums) <= 1 { return 0 } big := nums[0] secdbig := nums[1] if nums[1] >= nums[0] { big = nums[0] secdbig = nums[1] } count := 0 for i := 1; i < len(nums); i++ { if big < nums[i] { secdbig = big big = nums[i] count = i } if big > nums[i] && nums[i] > secdbig { secdbig = nums[i] } } if secdbig*2 <= big { return count } return -1 } //221. 最大正方形 func maximalSquare(matrix [][]byte) int { side := 0 for i := 0; i < len(matrix); i++ { for j := 0; j < len(matrix[i]); j++ { matrix[i][j] = byte(int(matrix[i][j]) % 48) if i < 1 || j < 1 { if matrix[i][j] == 1 && side < 1 { side = 1 } continue } else { if matrix[i][j] == 1 { temp := min(min(int(matrix[i-1][j]), int(matrix[i][j-1])), int(matrix[i-1][j-1])) + 1 matrix[i][j] = byte(temp) if temp > side { side = temp } } } } } return side * side } //1201. 丑数 //请你帮忙设计一个程序,用来找出第 n 个丑数 //丑数是可以被 a 或 b 或 c 整除的 正整数。 // x/a +x/b+x/c-x/ab-x/ac-x/bc+x/abc func nthUglyNumber3(n int, a int, b int, c int) int { ab := int64(lcm(a, b)) ac := int64(lcm(a, c)) bc := int64(lcm(b, c)) abc := int64(lcm(lcm(a, b), c)) l := int64(min(a, min(b, c))) r := int64(2 * 10e9) //while (l < r) for { if l >= r { break } // 从中间开始查找,每次的偏移量是了l/2 m := l + (r-l)/2 count := m/int64(a) + m/int64(b) + m/int64(c) - m/ab - m/ac - m/bc + m/abc //计算的数量如果小于 n 则 l= m+1 if count < int64(n) { l = m + 1 } else { //如果大于 n ,则继续二分 r = m } } return int(l) } func min(i int, j int) int { if i <= j { return i } return j } /* *公式解法:最小公倍数=两数之积/最大公约数 */ func lcm(x, y int) int { return x * y / gcd(x, y) } /* *辗转相除法:最大公约数 *递归写法,进入运算是x和y都不为0 */ func gcd(x, y int) int { tmp := x % y if tmp > 0 { return gcd(y, tmp) } else { return y } } func canCompleteCircuit(gas []int, cost []int) int { //贪心算法 start := 0 total := 0 last := 0 for i := 0; i < len(gas); i++ { total += gas[i] - cost[i] if last < 0 { last = gas[i] - cost[i] start = i } else { last += gas[i] - cost[i] } } if total >= 0 { return start } return -1 // gas := []int{5,1,2,3,4} // cost :=[]int{4,4,1,5,1} // for i:=0;i< len(gas);i++{ // g :=gas[i] // j :=i // if g<cost[j]{ // continue // }else { // for { // if j+1>=len(cost){ // g = g - cost[j] +gas[0] // if( g<cost[0]){ // break // } // // }else{ // g = g - cost[j] +gas[j+1] // if( g<cost[j+1]){ // break // } // } // j++ // if(j >=len(cost)){ // j=0 // } // if(j==i ){ // return i // } // } // } // } //return -1 } //263. 丑数 //动态规划 1*2 1*3 1*5 加入到切片中 然后 下标+1 继续加入 func nthUglyNumber(n int) int { s := []int{} s = append(s, 1) j := 0 k := 0 h := 0 for i := 0; i < n; i++ { min := min(min(s[j]*2, s[k]*3), s[h]*5) s = append(s, min) if min == s[j]*2 { j++ } if min == s[k]*3 { k++ } if min == s[h]*5 { h++ } if i == (n - 1) { return s[i] } } return s[n-1] } //263. 丑数 func isUgly(num int) bool { if num == 0 { return false } if num == 2 || num == 3 || num == 5 || num == 1 { return true } else if num%2 == 0 { return isUgly(num / 2) } else if num%3 == 0 { return isUgly(num / 3) } else if num%5 == 0 { return isUgly(num / 5) } return false } //20. 有效的括号 func isValid(s string) bool { var m = make(map[byte]byte) m['}'] = '{' m[']'] = '[' m[')'] = '(' var slice []byte for i := 0; i < len(s); i++ { if s[i] == '{' || s[i] == '(' || s[i] == '[' { slice = append(slice, s[i]) } else { if len(slice) > 0 { if (slice[len(slice)-1]) != m[s[i]] { return false } else { slice = slice[:len(slice)-1] } } else { return false } } } return len(slice) == 0 } //367. 有效的完全平方数 func isPerfectSquare(num int) bool { //更优解 //sumnum := 1 //for //{ // num -= sumnum; // sumnum += 2; // if(num<=0){ // break // } //} //return num==0 n := getnum(num, num) for i := n; i < 2*n; i++ { if i*i == num { return true } } return false } func getnum(n int, num int) int { if n*n > num { n = getnum(n/2, num) } return n } //953. 验证外星语词典 func isAlienSorted(words []string, order string) bool { mapStr := make(map[byte]int) count := len(order) //设置大小 for i := count; i > 0; i-- { mapStr[order[count-i]] = i } mapWords := []map[int]int{} for i := 0; i < len(words); i++ { temp := make(map[int]int) for j := 0; j < len(words[i]); j++ { temp[j] = mapStr[words[i][j]] } mapWords = append(mapWords, temp) } //[map[0:97 1:112 2:112 3:108 4:101] map[0:97 1:112 2:112]] fmt.Println(mapWords) for i := 0; i < len(mapWords)-1; i++ { for j := 0; j < len(mapWords[i]); j++ { m1, ok1 := mapWords[i][j] if !ok1 { m1 = 27 } m2, ok2 := mapWords[i+1][j] if !ok2 { m2 = 27 } if m1 < m2 { return false } else if m1 == m2 { continue } else { break } } } return true } //874模拟行走机器人 func robotSim(commands []int, obstacles [][]int) int { x := 0 y := 0 ons := 0 //var mapStr ="" //for i := 0; i < len(obstacles); i++ { // mapStr =mapStr+"["+strconv.Itoa(obstacles[i][0])+","+strconv.Itoa(obstacles[i][1])+"]" //} mapStr := make(map[int][]int) for i := 0; i < len(obstacles); i++ { mapStr[obstacles[i][0]] = append(mapStr[obstacles[i][0]], obstacles[i][1]) } //op 0 y+ -2or2 y- 1 x+ -1 x- var op = 0 for i := 0; i < len(commands); i++ { if commands[i] == -2 { if op == 2 || op == -2 { op = 1 } else { op-- } } else if commands[i] == -1 { if op == 2 || op == -2 { op = -1 } else { op++ } } else if commands[i] <= 9 && commands[i] >= 1 { switch op { case 0: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y + 1) { ok1 = true break } } if ok && ok1 { break } else { y++ } } case 1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x+1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x++ } } case -1: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x-1] ok1 := false for _, v := range item { if v == (y) { ok1 = true break } } if ok && ok1 { break } else { x-- } } case 2, -2: for j := 0; j < commands[i]; j++ { item, ok := mapStr[x] ok1 := false for _, v := range item { if v == (y - 1) { ok1 = true break } } if ok && ok1 { break } else { y-- } } } } if (x*x + y*y) > ons { ons = x*x + y*y } } return ons } //方阵中战斗力最弱的 K 行 func kWeakestRows(mat [][]int, k int) []int { type array struct { value int index int } slice1 := make([]array, len(mat)) slice2 := make([]int, k) for i := 0; i < len(mat); i++ { for j := 0; j < len(mat[i]); j++ { if mat[i][j] == 0 { slice1[i] = array{ value: j, index: i, } break } if j == len(mat[i])-1 { slice1[i] = array{ value: j + 1, index: i, } } } } //[2 4 1 2 5] for i := 0; i < len(slice1); i++ { for j := i + 1; j < len(slice1); j++ { if slice1[i].value > slice1[j].value { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } else if slice1[i].value == slice1[j].value && slice1[i].index > slice1[j].index { temp := slice1[i] slice1[i] = slice1[j] slice1[j] = temp } } } fmt.Println(slice1) for i := 0; i < k; i++ { slice2[i] = slice1[i].index } return slice2 }
} return count } /**
random_line_split
types.go
/* Copyright 2018 The Kubernetes Authors. Copyright 2020 Authors of Arktos - file modified. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" componentbaseconfigv1alpha1 "k8s.io/component-base/config/v1alpha1" ) // VolumeConfiguration contains *all* enumerated flags meant to configure all volume // plugins. From this config, the controller-manager binary will create many instances of // volume.VolumeConfig, each containing only the configuration needed for that plugin which // are then passed to the appropriate plugin. The ControllerManager binary is the only part // of the code which knows what plugins are supported and which flags correspond to each plugin. type VolumeConfiguration struct { // enableHostPathProvisioning enables HostPath PV provisioning when running without a // cloud provider. This allows testing and development of provisioning features. HostPath // provisioning is not supported in any way, won't work in a multi-node cluster, and // should not be used for anything other than testing or development. EnableHostPathProvisioning *bool // enableDynamicProvisioning enables the provisioning of volumes when running within an environment // that supports dynamic provisioning. Defaults to true. EnableDynamicProvisioning *bool // volumePluginDir is the full path of the directory in which the flex // volume plugin should search for additional third party volume plugins FlexVolumePluginDir string } // GroupResource describes an group resource. type GroupResource struct { // group is the group portion of the GroupResource. Group string // resource is the resource portion of the GroupResource. Resource string } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KubeControllerManagerConfiguration contains elements describing kube-controller manager. type KubeControllerManagerConfiguration struct { metav1.TypeMeta `json:",inline"` // Generic holds configuration for a generic controller-manager Generic GenericControllerManagerConfiguration // KubeCloudSharedConfiguration holds configuration for shared related features // both in cloud controller manager and kube-controller manager. KubeCloudShared KubeCloudSharedConfiguration // CSRSigningControllerConfiguration holds configuration for // CSRSigningController related features. CSRSigningController CSRSigningControllerConfiguration // DeprecatedControllerConfiguration holds configuration for some deprecated // features. DeprecatedController DeprecatedControllerConfiguration // EndpointControllerConfiguration holds configuration for EndpointController // related features.
EndpointController EndpointControllerConfiguration // GarbageCollectorControllerConfiguration holds configuration for // GarbageCollectorController related features. GarbageCollectorController GarbageCollectorControllerConfiguration // NamespaceControllerConfiguration holds configuration for NamespaceController // related features. NamespaceController NamespaceControllerConfiguration // PodGCControllerConfiguration holds configuration for PodGCController // related features. PodGCController PodGCControllerConfiguration // ReplicaSetControllerConfiguration holds configuration for ReplicaSet related features. ReplicaSetController ReplicaSetControllerConfiguration // SAControllerConfiguration holds configuration for ServiceAccountController // related features. SAController SAControllerConfiguration // TenantControllerConfiguration holds configuration for TenantController // related features. TenantController TenantControllerConfiguration `json:"tenantController,omitempty"` } // GenericControllerManagerConfiguration holds configuration for a generic controller-manager. type GenericControllerManagerConfiguration struct { // port is the port that the controller-manager's http service runs on. Port int32 // address is the IP address to serve on (set to 0.0.0.0 for all interfaces). Address string // minResyncPeriod is the resync period in reflectors; will be random between // minResyncPeriod and 2*minResyncPeriod. MinResyncPeriod metav1.Duration // ClientConnection specifies the kubeconfig file and client connection // settings for the proxy server to use when communicating with the apiserver. ClientConnection componentbaseconfigv1alpha1.ClientConnectionConfiguration // How long to wait between starting controller managers ControllerStartInterval metav1.Duration // leaderElection defines the configuration of leader election client. LeaderElection componentbaseconfigv1alpha1.LeaderElectionConfiguration // Controllers is the list of controllers to enable or disable // '*' means "all enabled by default controllers" // 'foo' means "enable 'foo'" // '-foo' means "disable 'foo'" // first item for a particular name wins Controllers []string // DebuggingConfiguration holds configuration for Debugging related features. Debugging componentbaseconfigv1alpha1.DebuggingConfiguration } // KubeCloudSharedConfiguration contains elements shared by both kube-controller manager // and cloud-controller manager, but not genericconfig. type KubeCloudSharedConfiguration struct { // CloudProviderConfiguration holds configuration for CloudProvider related features. CloudProvider CloudProviderConfiguration // externalCloudVolumePlugin specifies the plugin to use when cloudProvider is "external". // It is currently used by the in repo cloud providers to handle node and volume control in the KCM. ExternalCloudVolumePlugin string // useServiceAccountCredentials indicates whether controllers should be run with // individual service account credentials. UseServiceAccountCredentials bool // run with untagged cloud instances AllowUntaggedCloud bool // routeReconciliationPeriod is the period for reconciling routes created for Nodes by cloud provider.. RouteReconciliationPeriod metav1.Duration // nodeMonitorPeriod is the period for syncing NodeStatus in NodeController. NodeMonitorPeriod metav1.Duration // clusterName is the instance prefix for the cluster. ClusterName string // clusterCIDR is CIDR Range for Pods in cluster. ClusterCIDR string // AllocateNodeCIDRs enables CIDRs for Pods to be allocated and, if // ConfigureCloudRoutes is true, to be set on the cloud provider. AllocateNodeCIDRs bool // CIDRAllocatorType determines what kind of pod CIDR allocator will be used. CIDRAllocatorType string // configureCloudRoutes enables CIDRs allocated with allocateNodeCIDRs // to be configured on the cloud provider. ConfigureCloudRoutes *bool // nodeSyncPeriod is the period for syncing nodes from cloudprovider. Longer // periods will result in fewer calls to cloud provider, but may delay addition // of new nodes to cluster. NodeSyncPeriod metav1.Duration } // CloudProviderConfiguration contains basically elements about cloud provider. type CloudProviderConfiguration struct { // Name is the provider for cloud services. Name string // cloudConfigFile is the path to the cloud provider configuration file. CloudConfigFile string } // CSRSigningControllerConfiguration contains elements describing CSRSigningController. type CSRSigningControllerConfiguration struct { // clusterSigningCertFile is the filename containing a PEM-encoded // X509 CA certificate used to issue cluster-scoped certificates ClusterSigningCertFile string // clusterSigningCertFile is the filename containing a PEM-encoded // RSA or ECDSA private key used to issue cluster-scoped certificates ClusterSigningKeyFile string // clusterSigningDuration is the length of duration signed certificates // will be given. ClusterSigningDuration metav1.Duration } // DeprecatedControllerConfiguration contains elements be deprecated. type DeprecatedControllerConfiguration struct { // DEPRECATED: deletingPodsQps is the number of nodes per second on which pods are deleted in // case of node failure. DeletingPodsQPS float32 // DEPRECATED: deletingPodsBurst is the number of nodes on which pods are bursty deleted in // case of node failure. For more details look into RateLimiter. DeletingPodsBurst int32 // registerRetryCount is the number of retries for initial node registration. // Retry interval equals node-sync-period. RegisterRetryCount int32 } // EndpointControllerConfiguration contains elements describing EndpointController. type EndpointControllerConfiguration struct { // concurrentEndpointSyncs is the number of endpoint syncing operations // that will be done concurrently. Larger number = faster endpoint updating, // but more CPU (and network) load. ConcurrentEndpointSyncs int32 } // GarbageCollectorControllerConfiguration contains elements describing GarbageCollectorController. type GarbageCollectorControllerConfiguration struct { // enables the generic garbage collector. MUST be synced with the // corresponding flag of the kube-apiserver. WARNING: the generic garbage // collector is an alpha feature. EnableGarbageCollector *bool // concurrentGCSyncs is the number of garbage collector workers that are // allowed to sync concurrently. ConcurrentGCSyncs int32 // gcIgnoredResources is the list of GroupResources that garbage collection should ignore. GCIgnoredResources []GroupResource } // NamespaceControllerConfiguration contains elements describing NamespaceController. type NamespaceControllerConfiguration struct { // namespaceSyncPeriod is the period for syncing namespace life-cycle // updates. NamespaceSyncPeriod metav1.Duration // concurrentNamespaceSyncs is the number of namespace objects that are // allowed to sync concurrently. ConcurrentNamespaceSyncs int32 } // PodGCControllerConfiguration contains elements describing PodGCController. type PodGCControllerConfiguration struct { // terminatedPodGCThreshold is the number of terminated pods that can exist // before the terminated pod garbage collector starts deleting terminated pods. // If <= 0, the terminated pod garbage collector is disabled. TerminatedPodGCThreshold int32 } // ReplicaSetControllerConfiguration contains elements describing ReplicaSetController. type ReplicaSetControllerConfiguration struct { // concurrentRSSyncs is the number of replica sets that are allowed to sync // concurrently. Larger number = more responsive replica management, but more // CPU (and network) load. ConcurrentRSSyncs int32 } // SAControllerConfiguration contains elements describing ServiceAccountController. type SAControllerConfiguration struct { // serviceAccountKeyFile is the filename containing a PEM-encoded private RSA key // used to sign service account tokens. ServiceAccountKeyFile string // concurrentSATokenSyncs is the number of service account token syncing operations // that will be done concurrently. ConcurrentSATokenSyncs int32 // rootCAFile is the root certificate authority will be included in service // account's token secret. This must be a valid PEM-encoded CA bundle. RootCAFile string } // TenantControllerConfiguration contains elements describing TenantController. type TenantControllerConfiguration struct { // TenantSyncPeriod is the period for syncing tenant life-cycle // updates. TenantSyncPeriod metav1.Duration `json:"tenantSyncPeriod,omitempty"` // concurrentTenantSyncs is the number of tenant objects that are // allowed to sync concurrently. ConcurrentTenantSyncs int32 `json:"concurrentTenantSyncs,omitempty"` // DefaultNetworkTemplatePath is the path to json-formatted template file of default network in tenant space DefaultNetworkTemplatePath string `json:"defaultNetworkTemplatePath,omitempty"` }
random_line_split
html_tools.py
import fnmatch import io, os, re import yaml def rewrite_outdir(out_dir, chapter_dirs, static_host): build_dir = os.path.dirname(out_dir) if static_host and not static_host.endswith('/'): static_host += '/' for path in _walk(build_dir): rewrite_file_links(path, out_dir, chapter_dirs, static_host) def rewrite_file_links(path, root, chapter_dirs, static_host): content = _read_file(path) link_elements = [ ('a', 'href'), ] other_elements = [ ('img', 'src'), ('script', 'src'), ('iframe', 'src'), ('link', 'href'), ('video', 'poster'), ('source', 'src'), ] if path.endswith(".yaml"): # YAML files are handled separately because rewriting links with # a regexp could add YAML syntax errors to the file if quotes are not # escaped properly. Escaping is now taken care of by the YAML module. yaml_data_dict = yaml.safe_load(content) recursive_rewrite_links( yaml_data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')), ) # _rst_srcpath is an internal value stored in the YAML file. # It is the path of the RST source file that contains the exercise. # The path is needed for fixing relative URLs, usually links pointing # to other chapters and exercises. It may have multiple values for # different languages in multilingual courses or only one string value # in monolingual courses. content = yaml.safe_dump(yaml_data_dict, default_flow_style=False, allow_unicode=True) else: content = rewrite_links( content, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', ) _write_file(path, content) def rewrite_links(content, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path=None): q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#". q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name. for tag, attr in link_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, chapter_append, yaml_append, rst_src_path) for tag, attr in other_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, None, None, yaml_append, rst_src_path) return content def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append, yaml_append, rst_src_path=None): dir_name = os.path.dirname(path) out = "" p = re.compile( r'<' + tag + r'\s+[^<>]*' r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)' ) i = 0 for m in p.finditer(content): val = m.group('val') if val and not q1.search(val): # Add content up to attribute. j = m.start('attr') out += content[i:j] i = j full = '' if path.endswith('.yaml'): # content in yaml file # rst_src_path: The RST source file path is needed for fixing # relative URLs in the exercise description. # It should have been saved in the YAML data by the exercise directive. if rst_src_path: full = os.path.realpath(os.path.join( root, os.path.dirname(rst_src_path), val )) else: # We don't know which directory the relative path starts from, # so just assume the build root. It is likely incorrect. full = os.path.realpath(os.path.join(root, val)) else: # content in html file # dir_name points to either _build/html or _build/html/<round> full = os.path.realpath(os.path.join(dir_name, val)) if full.startswith(root): # NB: root ends with "_build/html" val_path_from_root = full[len(root)+1:].replace('\\', '/') # Replace Windows path separator backslash to the forward slash. # Links to chapters. if q2 and q2.search(val_path_from_root): if not out.endswith(append): # Directory depth (starting from _build/html) of the source file # that contains the link val. if path.endswith('.yaml'): # yaml files are always directly under _build/yaml, # but A+ can fix the URL when we prepend "../" once. # Most courses place chapters and exercises directly # under the module directory, in which case one # "../" is logical. dir_depth = 1 else: dir_depth = path[len(root)+1:].count(os.sep) val_path_from_root = ('../' * dir_depth) + val_path_from_root j = m.start('val') out += append + content[i:j] + val_path_from_root i = m.end('val') # Other links. elif static_host: j = m.start('val') out += content[i:j] + static_host + val_path_from_root i = m.end('val') elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append): # Sphinx sets URLs to local files as relative URLs that work in # the local filesystem (e.g., ../_images/myimage.png). # The A+ frontend converts the URLs correctly when they are in # the chapter content. (The URL must be converted to an absolute # URL that refers to the MOOC grader course static files.) # However, the conversion does not work for URLs in exercise # descriptions because unlike for chapters, the service URL of # an exercise does not refer to the course static files. # Therefore, we add the attribute data-aplus-path="/static/{course}" # that A+ frontend uses to set the correct URL path. # Unfortunately, we must hardcode the MOOC grader static URL # (/static) here. out += yaml_append out += content[i:] return out def _walk(html_dir): files = [] for root, dirnames, filenames in os.walk(html_dir): for filename in fnmatch.filter(filenames, '*.html'): files.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.yaml'): files.append(os.path.join(root, filename)) return files def _read_file(file_path): with io.open(file_path, 'r', encoding='utf-8') as f: return f.read() def _write_file(file_path, content):
def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key=False, lang=None): '''Rewrite links in the string values inside the data_dict.''' # YAML file may have a list or a dictionary in the topmost level. # lang_key and lang are used to pick the correct language from rst_src_path. if isinstance(data_dict, dict): for key, val in data_dict.items(): if lang_key: # data_dict is the value for a key that had the ending "|i18n", # so now key is a language code. lang = key if isinstance(val, dict) or isinstance(val, list): recursive_rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, key.endswith('|i18n'), lang) # lang_key: if key is, e.g., "title|i18n", then the val dict # contains keys like "en" and "fi". elif isinstance(val, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[key] = rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path) elif isinstance(data_dict, list): for i, a in enumerate(data_dict): if isinstance(a, dict) or isinstance(a, list): recursive_rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key, lang) elif isinstance(a, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[i] = rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path)
with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content)
identifier_body
html_tools.py
import fnmatch import io, os, re import yaml def rewrite_outdir(out_dir, chapter_dirs, static_host): build_dir = os.path.dirname(out_dir) if static_host and not static_host.endswith('/'): static_host += '/' for path in _walk(build_dir): rewrite_file_links(path, out_dir, chapter_dirs, static_host) def rewrite_file_links(path, root, chapter_dirs, static_host): content = _read_file(path) link_elements = [ ('a', 'href'), ] other_elements = [ ('img', 'src'), ('script', 'src'), ('iframe', 'src'), ('link', 'href'), ('video', 'poster'), ('source', 'src'), ] if path.endswith(".yaml"): # YAML files are handled separately because rewriting links with # a regexp could add YAML syntax errors to the file if quotes are not # escaped properly. Escaping is now taken care of by the YAML module. yaml_data_dict = yaml.safe_load(content) recursive_rewrite_links( yaml_data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')), ) # _rst_srcpath is an internal value stored in the YAML file. # It is the path of the RST source file that contains the exercise. # The path is needed for fixing relative URLs, usually links pointing # to other chapters and exercises. It may have multiple values for # different languages in multilingual courses or only one string value # in monolingual courses. content = yaml.safe_dump(yaml_data_dict, default_flow_style=False, allow_unicode=True) else: content = rewrite_links( content, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', ) _write_file(path, content) def
(content, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path=None): q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#". q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name. for tag, attr in link_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, chapter_append, yaml_append, rst_src_path) for tag, attr in other_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, None, None, yaml_append, rst_src_path) return content def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append, yaml_append, rst_src_path=None): dir_name = os.path.dirname(path) out = "" p = re.compile( r'<' + tag + r'\s+[^<>]*' r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)' ) i = 0 for m in p.finditer(content): val = m.group('val') if val and not q1.search(val): # Add content up to attribute. j = m.start('attr') out += content[i:j] i = j full = '' if path.endswith('.yaml'): # content in yaml file # rst_src_path: The RST source file path is needed for fixing # relative URLs in the exercise description. # It should have been saved in the YAML data by the exercise directive. if rst_src_path: full = os.path.realpath(os.path.join( root, os.path.dirname(rst_src_path), val )) else: # We don't know which directory the relative path starts from, # so just assume the build root. It is likely incorrect. full = os.path.realpath(os.path.join(root, val)) else: # content in html file # dir_name points to either _build/html or _build/html/<round> full = os.path.realpath(os.path.join(dir_name, val)) if full.startswith(root): # NB: root ends with "_build/html" val_path_from_root = full[len(root)+1:].replace('\\', '/') # Replace Windows path separator backslash to the forward slash. # Links to chapters. if q2 and q2.search(val_path_from_root): if not out.endswith(append): # Directory depth (starting from _build/html) of the source file # that contains the link val. if path.endswith('.yaml'): # yaml files are always directly under _build/yaml, # but A+ can fix the URL when we prepend "../" once. # Most courses place chapters and exercises directly # under the module directory, in which case one # "../" is logical. dir_depth = 1 else: dir_depth = path[len(root)+1:].count(os.sep) val_path_from_root = ('../' * dir_depth) + val_path_from_root j = m.start('val') out += append + content[i:j] + val_path_from_root i = m.end('val') # Other links. elif static_host: j = m.start('val') out += content[i:j] + static_host + val_path_from_root i = m.end('val') elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append): # Sphinx sets URLs to local files as relative URLs that work in # the local filesystem (e.g., ../_images/myimage.png). # The A+ frontend converts the URLs correctly when they are in # the chapter content. (The URL must be converted to an absolute # URL that refers to the MOOC grader course static files.) # However, the conversion does not work for URLs in exercise # descriptions because unlike for chapters, the service URL of # an exercise does not refer to the course static files. # Therefore, we add the attribute data-aplus-path="/static/{course}" # that A+ frontend uses to set the correct URL path. # Unfortunately, we must hardcode the MOOC grader static URL # (/static) here. out += yaml_append out += content[i:] return out def _walk(html_dir): files = [] for root, dirnames, filenames in os.walk(html_dir): for filename in fnmatch.filter(filenames, '*.html'): files.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.yaml'): files.append(os.path.join(root, filename)) return files def _read_file(file_path): with io.open(file_path, 'r', encoding='utf-8') as f: return f.read() def _write_file(file_path, content): with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content) def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key=False, lang=None): '''Rewrite links in the string values inside the data_dict.''' # YAML file may have a list or a dictionary in the topmost level. # lang_key and lang are used to pick the correct language from rst_src_path. if isinstance(data_dict, dict): for key, val in data_dict.items(): if lang_key: # data_dict is the value for a key that had the ending "|i18n", # so now key is a language code. lang = key if isinstance(val, dict) or isinstance(val, list): recursive_rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, key.endswith('|i18n'), lang) # lang_key: if key is, e.g., "title|i18n", then the val dict # contains keys like "en" and "fi". elif isinstance(val, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[key] = rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path) elif isinstance(data_dict, list): for i, a in enumerate(data_dict): if isinstance(a, dict) or isinstance(a, list): recursive_rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key, lang) elif isinstance(a, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[i] = rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path)
rewrite_links
identifier_name
html_tools.py
import fnmatch import io, os, re import yaml def rewrite_outdir(out_dir, chapter_dirs, static_host): build_dir = os.path.dirname(out_dir) if static_host and not static_host.endswith('/'): static_host += '/' for path in _walk(build_dir): rewrite_file_links(path, out_dir, chapter_dirs, static_host) def rewrite_file_links(path, root, chapter_dirs, static_host): content = _read_file(path) link_elements = [ ('a', 'href'), ] other_elements = [ ('img', 'src'), ('script', 'src'), ('iframe', 'src'), ('link', 'href'), ('video', 'poster'), ('source', 'src'), ] if path.endswith(".yaml"): # YAML files are handled separately because rewriting links with # a regexp could add YAML syntax errors to the file if quotes are not # escaped properly. Escaping is now taken care of by the YAML module. yaml_data_dict = yaml.safe_load(content) recursive_rewrite_links( yaml_data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')), ) # _rst_srcpath is an internal value stored in the YAML file. # It is the path of the RST source file that contains the exercise. # The path is needed for fixing relative URLs, usually links pointing # to other chapters and exercises. It may have multiple values for # different languages in multilingual courses or only one string value # in monolingual courses. content = yaml.safe_dump(yaml_data_dict, default_flow_style=False, allow_unicode=True) else: content = rewrite_links( content, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', ) _write_file(path, content) def rewrite_links(content, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path=None): q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#". q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name. for tag, attr in link_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, chapter_append, yaml_append, rst_src_path) for tag, attr in other_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, None, None, yaml_append, rst_src_path) return content def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append, yaml_append, rst_src_path=None): dir_name = os.path.dirname(path) out = "" p = re.compile( r'<' + tag + r'\s+[^<>]*' r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)' ) i = 0 for m in p.finditer(content): val = m.group('val') if val and not q1.search(val): # Add content up to attribute. j = m.start('attr') out += content[i:j] i = j full = '' if path.endswith('.yaml'): # content in yaml file # rst_src_path: The RST source file path is needed for fixing # relative URLs in the exercise description. # It should have been saved in the YAML data by the exercise directive. if rst_src_path: full = os.path.realpath(os.path.join( root, os.path.dirname(rst_src_path), val )) else: # We don't know which directory the relative path starts from, # so just assume the build root. It is likely incorrect. full = os.path.realpath(os.path.join(root, val)) else: # content in html file # dir_name points to either _build/html or _build/html/<round> full = os.path.realpath(os.path.join(dir_name, val)) if full.startswith(root): # NB: root ends with "_build/html" val_path_from_root = full[len(root)+1:].replace('\\', '/') # Replace Windows path separator backslash to the forward slash. # Links to chapters. if q2 and q2.search(val_path_from_root): if not out.endswith(append): # Directory depth (starting from _build/html) of the source file # that contains the link val. if path.endswith('.yaml'): # yaml files are always directly under _build/yaml, # but A+ can fix the URL when we prepend "../" once. # Most courses place chapters and exercises directly # under the module directory, in which case one # "../" is logical. dir_depth = 1 else: dir_depth = path[len(root)+1:].count(os.sep) val_path_from_root = ('../' * dir_depth) + val_path_from_root j = m.start('val') out += append + content[i:j] + val_path_from_root i = m.end('val') # Other links. elif static_host: j = m.start('val') out += content[i:j] + static_host + val_path_from_root i = m.end('val') elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append): # Sphinx sets URLs to local files as relative URLs that work in # the local filesystem (e.g., ../_images/myimage.png). # The A+ frontend converts the URLs correctly when they are in # the chapter content. (The URL must be converted to an absolute # URL that refers to the MOOC grader course static files.) # However, the conversion does not work for URLs in exercise # descriptions because unlike for chapters, the service URL of # an exercise does not refer to the course static files. # Therefore, we add the attribute data-aplus-path="/static/{course}" # that A+ frontend uses to set the correct URL path. # Unfortunately, we must hardcode the MOOC grader static URL # (/static) here. out += yaml_append
out += content[i:] return out def _walk(html_dir): files = [] for root, dirnames, filenames in os.walk(html_dir): for filename in fnmatch.filter(filenames, '*.html'): files.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.yaml'): files.append(os.path.join(root, filename)) return files def _read_file(file_path): with io.open(file_path, 'r', encoding='utf-8') as f: return f.read() def _write_file(file_path, content): with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content) def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key=False, lang=None): '''Rewrite links in the string values inside the data_dict.''' # YAML file may have a list or a dictionary in the topmost level. # lang_key and lang are used to pick the correct language from rst_src_path. if isinstance(data_dict, dict): for key, val in data_dict.items(): if lang_key: # data_dict is the value for a key that had the ending "|i18n", # so now key is a language code. lang = key if isinstance(val, dict) or isinstance(val, list): recursive_rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, key.endswith('|i18n'), lang) # lang_key: if key is, e.g., "title|i18n", then the val dict # contains keys like "en" and "fi". elif isinstance(val, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[key] = rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path) elif isinstance(data_dict, list): for i, a in enumerate(data_dict): if isinstance(a, dict) or isinstance(a, list): recursive_rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key, lang) elif isinstance(a, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[i] = rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path)
random_line_split
html_tools.py
import fnmatch import io, os, re import yaml def rewrite_outdir(out_dir, chapter_dirs, static_host): build_dir = os.path.dirname(out_dir) if static_host and not static_host.endswith('/'): static_host += '/' for path in _walk(build_dir): rewrite_file_links(path, out_dir, chapter_dirs, static_host) def rewrite_file_links(path, root, chapter_dirs, static_host): content = _read_file(path) link_elements = [ ('a', 'href'), ] other_elements = [ ('img', 'src'), ('script', 'src'), ('iframe', 'src'), ('link', 'href'), ('video', 'poster'), ('source', 'src'), ] if path.endswith(".yaml"): # YAML files are handled separately because rewriting links with # a regexp could add YAML syntax errors to the file if quotes are not # escaped properly. Escaping is now taken care of by the YAML module. yaml_data_dict = yaml.safe_load(content) recursive_rewrite_links( yaml_data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', yaml_data_dict.get('_rst_srcpath|i18n', yaml_data_dict.get('_rst_srcpath')), ) # _rst_srcpath is an internal value stored in the YAML file. # It is the path of the RST source file that contains the exercise. # The path is needed for fixing relative URLs, usually links pointing # to other chapters and exercises. It may have multiple values for # different languages in multilingual courses or only one string value # in monolingual courses. content = yaml.safe_dump(yaml_data_dict, default_flow_style=False, allow_unicode=True) else: content = rewrite_links( content, path, root, link_elements, other_elements, static_host, chapter_dirs, 'data-aplus-chapter ', 'data-aplus-path="/static/{course}" ', ) _write_file(path, content) def rewrite_links(content, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path=None): q1 = re.compile(r'^(\w+:|//|#)') # Starts with "https:", "//" or "#". q2 = re.compile(r'^(' + '|'.join(chapter_dirs) + r')(/|\\)') # Starts with a module directory name. for tag, attr in link_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, chapter_append, yaml_append, rst_src_path) for tag, attr in other_elements: content = rewrite_elements(content, tag, attr, path, root, q1, static_host, None, None, yaml_append, rst_src_path) return content def rewrite_elements(content, tag, attr, path, root, q1, static_host, q2, append, yaml_append, rst_src_path=None): dir_name = os.path.dirname(path) out = "" p = re.compile( r'<' + tag + r'\s+[^<>]*' r'(?P<attr>' + attr + r')=(?P<slash>\\?)"(?P<val>[^"?#]*)' ) i = 0 for m in p.finditer(content): val = m.group('val') if val and not q1.search(val): # Add content up to attribute. j = m.start('attr') out += content[i:j] i = j full = '' if path.endswith('.yaml'): # content in yaml file # rst_src_path: The RST source file path is needed for fixing # relative URLs in the exercise description. # It should have been saved in the YAML data by the exercise directive. if rst_src_path: full = os.path.realpath(os.path.join( root, os.path.dirname(rst_src_path), val )) else: # We don't know which directory the relative path starts from, # so just assume the build root. It is likely incorrect. full = os.path.realpath(os.path.join(root, val)) else: # content in html file # dir_name points to either _build/html or _build/html/<round> full = os.path.realpath(os.path.join(dir_name, val)) if full.startswith(root): # NB: root ends with "_build/html" val_path_from_root = full[len(root)+1:].replace('\\', '/') # Replace Windows path separator backslash to the forward slash. # Links to chapters. if q2 and q2.search(val_path_from_root): if not out.endswith(append): # Directory depth (starting from _build/html) of the source file # that contains the link val. if path.endswith('.yaml'): # yaml files are always directly under _build/yaml, # but A+ can fix the URL when we prepend "../" once. # Most courses place chapters and exercises directly # under the module directory, in which case one # "../" is logical. dir_depth = 1 else: dir_depth = path[len(root)+1:].count(os.sep) val_path_from_root = ('../' * dir_depth) + val_path_from_root j = m.start('val') out += append + content[i:j] + val_path_from_root i = m.end('val') # Other links. elif static_host: j = m.start('val') out += content[i:j] + static_host + val_path_from_root i = m.end('val') elif path.endswith('.yaml') and yaml_append and not out.endswith(yaml_append): # Sphinx sets URLs to local files as relative URLs that work in # the local filesystem (e.g., ../_images/myimage.png). # The A+ frontend converts the URLs correctly when they are in # the chapter content. (The URL must be converted to an absolute # URL that refers to the MOOC grader course static files.) # However, the conversion does not work for URLs in exercise # descriptions because unlike for chapters, the service URL of # an exercise does not refer to the course static files. # Therefore, we add the attribute data-aplus-path="/static/{course}" # that A+ frontend uses to set the correct URL path. # Unfortunately, we must hardcode the MOOC grader static URL # (/static) here. out += yaml_append out += content[i:] return out def _walk(html_dir): files = [] for root, dirnames, filenames in os.walk(html_dir): for filename in fnmatch.filter(filenames, '*.html'): files.append(os.path.join(root, filename)) for filename in fnmatch.filter(filenames, '*.yaml'): files.append(os.path.join(root, filename)) return files def _read_file(file_path): with io.open(file_path, 'r', encoding='utf-8') as f: return f.read() def _write_file(file_path, content): with io.open(file_path, 'w', encoding='utf-8') as f: f.write(content) def recursive_rewrite_links(data_dict, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key=False, lang=None): '''Rewrite links in the string values inside the data_dict.''' # YAML file may have a list or a dictionary in the topmost level. # lang_key and lang are used to pick the correct language from rst_src_path. if isinstance(data_dict, dict): for key, val in data_dict.items(): if lang_key: # data_dict is the value for a key that had the ending "|i18n", # so now key is a language code. lang = key if isinstance(val, dict) or isinstance(val, list):
elif isinstance(val, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[key] = rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path) elif isinstance(data_dict, list): for i, a in enumerate(data_dict): if isinstance(a, dict) or isinstance(a, list): recursive_rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, lang_key, lang) elif isinstance(a, str): if isinstance(rst_src_path, dict): lang_rst_src_path = rst_src_path.get(lang if lang else 'en') else: lang_rst_src_path = rst_src_path data_dict[i] = rewrite_links(a, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, lang_rst_src_path)
recursive_rewrite_links(val, path, root, link_elements, other_elements, static_host, chapter_dirs, chapter_append, yaml_append, rst_src_path, key.endswith('|i18n'), lang) # lang_key: if key is, e.g., "title|i18n", then the val dict # contains keys like "en" and "fi".
conditional_block
common.js
$( function() { $( "#slider" ).slider(); } ); $(document).ready(function() { $("body").css({ "opacity": "1", "transition": "opacity 1s ease-in-out" }); // faq scroll faqNav.init(); fixObj.init(); scrollTo(); $(window).on("backstretch.show", function (e, instance, index) { syncBg() $('.overlay').removeClass('show'); }); createBg(); }); /* old $('.list-indicator').fixer({ gap: 100 });*/ //if(!detectmob()) { $('.menu-bg').fixer({ gap: 0 }); //} $( "#show_more" ).click(function() { $('#another-element').slideToggle('slow'); $(this).remove(); }); $(function(){ $('.code-wrapper').on( "mousemove", function(e) { var offsets = $(this).offset(); var fullWidth = $(this).width(); var mouseX = e.pageX - offsets.left; if (mouseX < 0) { mouseX = 0; } else if (mouseX > fullWidth) { mouseX = fullWidth } $(this).parent().find('.divider-bar').css({ left: mouseX, transition: 'none' }); $(this).find('.design-wrapper').css({ transform: 'translateX(' + (mouseX) + 'px)', transition: 'none' }); $(this).find('.design-image').css({ transform: 'translateX(' + (-1*mouseX) + 'px)', transition: 'none' }); }); $('.divider-wrapper').on( "mouseleave", function() { $(this).parent().find('.divider-bar').css({ left: '50%', transition: 'all .3s' }); $(this).find('.design-wrapper').css({ transform: 'translateX(50%)', transition: 'all .3s' }); $(this).find('.design-image').css({ transform: 'translateX(-50%)', transition: 'all .3s' }); }); $('.user-feedback').click(function(e){ e.preventDefault(); var to = $('.contact').offset().top; $('html, body').animate({scrollTop: to - 90}, 'slow'); $('.contact form input').eq(0).focus(); }) }); // end of document ready (function($) { document.addEventListener("touchmove", syncBg, true); window.addEventListener("scroll", syncBg, true); //window.addEventListener("scroll", listIndicate, true); $('[data-bg]').click(function (e) { e.preventDefault(); changeBg( $(this) ); }); }(jQuery)); function createBg() { // if no mobile not use alg if(!detectmob()) { return false; } // set bstretch vs bg $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); if( $bg.hasClass('empty-bg') ) { $bg.css({opacity: 0}); } else { var src = 'img/bg-'+$section.attr('id')+'.png'; $bg.backstretch({url: src, alignY: 0}); $section.backstretch({url: src, alignY: 0}); } }) } function changeBg(j) { var $section = j.parents('.section'); var $bg = $section.find('.menu-bg'); // set active li var $li = j.parent().siblings(); $li.removeClass('active'); j.parent().addClass('active'); // change effect $('.overlay').addClass('show'); var img = new Image(); img.src = 'img/' + j.data('bg'); // if no mobile not use alg if(!detectmob()) { img.onload = function() { if($section.length) { $section.css({'background-image': 'url('+img.src+')'}); } if($bg.length) { $bg.css({'background-image': 'url('+img.src+')'}); } syncBg(); $('.overlay').removeClass('show'); } } // if mobile else { $section.backstretch({url: img.src, alignY: 0}); $bg.backstretch({url: img.src, alignY: 0}); } } function syncBg() { // if no mobile not use alg if(!detectmob()) { return false; } $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); var s = getScroll(); var is_top = ($section.offset().top - s.top); // for empty bgbg if( $bg.hasClass('empty-bg') ) { if ( is_top < 0 ) { $bg.css({opacity: 1}); } else { $bg.css({opacity: 0}); } } // for bg else { if ( is_top < 0 ) { $section.children('.backstretch').css({position: 'fixed'}); $section.find('.menu-bg').css({position: 'fixed'}); } else { $section.children('.backstretch').css({position: 'absolute'}); $section.find('.menu-bg').css({position: 'absolute',top: 0}); } } }) } function listIndicate() { var $section = $('.faq'); if( !$section.length ) return false; var $i = $('.faq .list'); var $pointer = $('.list-indicator'); var s = getScroll(); var is_top = ($section.offset().top - s.top); var faq_height = ($section.height() - 100); var faq_nav_height = $i.height() ; var faq_pointer_height = $pointer.height() ; console.log(faq_height + ' ' +is_top) var bottomstopeffect = -450; var correct = -110; if ( is_top < 0 && is_top > bottomstopeffect ) { $i.css({position: 'fixed', 'top': 120, 'width': $section.width() / 4}); console.log( (faq_height + is_top) / faq_height ) $pointer.css({position: 'fixed', 'top': 120 + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height )}); } else if(is_top < bottomstopeffect) { $i.css({position: 'absolute', 'top': faq_nav_height + correct}); var ptop = faq_nav_height + correct + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height ); if(ptop > 2*faq_nav_height + correct - faq_pointer_height) { ptop = 2*faq_nav_height + correct - faq_pointer_height; } $pointer.css({position: 'absolute',top: ptop }); } else { $i.css({position: 'absolute', 'top': 0}); $pointer.css({position: 'absolute',top: 0}); } } function test(text) { $('.test').text(text); } function detectmob() { return true; if( navigator.userAgent.match(/Android/i) || navigator.userAgent.match(/webOS/i) || navigator.userAgent.match(/iPhone/i) || navigator.userAgent.match(/iPad/i) || navigator.userAgent.match(/iPod/i) || navigator.userAgent.match(/BlackBerry/i) || navigator.userAgent.match(/Windows Phone/i) ){ return true; } else { return false; } } function getScroll() { var data, scrOfX = 0, scrOfY = 0; if( typeof( window.pageYOffset ) == 'number' ) { //Netscape compliant scrOfY = window.pageYOffset; scrOfX = window.pageXOffset; } else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) { //DOM compliant scrOfY = document.body.scrollTop; scrOfX = document.body.scrollLeft; } else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) { //IE6 Strict scrOfY = document.documentElement.scrollTop; scrOfX = document.documentElement.scrollLeft; } data = {'left': scrOfX, 'top' : scrOfY}; return data; } function initMap(){ // text overlay proto function TxtOverlay(pos, txt, cls, map) { this.pos = pos; this.txt_ = txt; this.cls_ = cls; this.map_ = map; this.div_ = null; this.setMap(map); } TxtOverlay.prototype = new google.maps.OverlayView(); TxtOverlay.prototype.onAdd = function() { var div = document.createElement('DIV'); div.className = this.cls_; div.innerHTML = this.txt_; this.div_ = div; var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; var panes = this.getPanes(); panes.floatPane.appendChild(div); } TxtOverlay.prototype.draw = function() { var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); var div = this.div_; div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; console.log(position) } // create map var point = new google.maps.LatLng(55.774210, 37.520200); var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3'; var myMapOptions = { zoom: 16, center: point, mapTypeId: google.maps.MapTypeId.TERRAIN }; var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions); var image = new google.maps.MarkerImage( 'img/map-image.png', new google.maps.Size(61,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shadow = new google.maps.MarkerImage( 'img/map-shadow.png', new google.maps.Size(106,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shape = { coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65,17,64,17,63,16,62,15,61,14,60,14,59,13,58,12,57,11,56,11,55,10,54,9,53,9,52,8,51,7,50,7,49,6,48,6,47,5,46,4,45,4,44,3,43,3,42,2,41,2,40,2,39,2,38,1,37,1,36,1,35,1,34,1,33,1,32,1,31,1,30,1,29,1,28,1,27,1,26,1,25,1,24,1,23,2,22,2,21,2,20,3,19,3,18,3,17,4,16,4,15,5,14,6,13,6,12,7,11,8,10,9,9,10,8,11,7,12,6,14,5,15,4,17,3,20,2,23,1,37,1], type: 'poly' }; var marker = new google.maps.Marker({ draggable: true, raiseOnDrag: false, icon: image, shadow: shadow, shape: shape, map: map, position: point }); txt = new TxtOverlay(point, tooltipTemplate, "map_label", map) } var forms = { errors: {empty: 'Поле не заполнено', email: 'Это не почта' }, checkemail: function(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); }, prevalidate: function () { $('[name="name"], [name="email"]').css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); }, makeerrfield: function(name) { $('[name="'+name+'"]').css('border', '#ff6850 1px solid'); // $('[name="'+name+'"]').css('color', '#ff6850');
forms.makeerrfield(name); $("#error_mes").html(msg); $('[name="'+name+'"]').after( $("#error_mes") ); $("#error_mes").fadeIn("slow"); }, validate: function() { forms.prevalidate(); if( !$('[name="name"]').val() ) { forms.showError('name', forms.errors.empty ); return false; } if( !$('[name="email"]').val() ) { forms.showError('email', forms.errors.empty ); return false; } if( !forms.checkemail( $('[name="email"]').val() ) ) { forms.showError('email', forms.errors.email ); return false; } return true; } } function validate() { return forms.validate(); } /* $(function(){ var field = new Array("name", "email"); $("form").submit(function() { var error=0; $("form").find(":input").each(function() { for(var i=0;i<field.length;i++){ if($(this).attr("name")==field[i]){ if(!$(this).val()){ $(this).css('border', '#ff6850 1px solid'); $(this).css('color', '#ff6850'); error=1; } else{ $(this).css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); } } } }) var email = $("#email").val(); if(!isValidEmailAddress(email)){ error=2; $("#email").css('border', '#ff6850 1px solid'); } if(error==0){ return true; } else{ if(error==1) err_text="Поле не заполнено"; if(error==2) err_text="Это не почта"; $("#error_mes").html(err_text); $("#error_mes").fadeIn("slow"); return false; } }) }); function isValidEmailAddress(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); } */ var fixObj = { fix: function(wrapper, obj, w) { if(wrapper.length && obj.length){ this.minTop = wrapper.offset().top; if (obj.attr("id") == "js-home-howto-pass-btn") { this.maxTop = ($("#js-home-test-wrapper").offset().top + 300) - $(window).height(); } else { this.maxTop = wrapper.offset().top - 50 + (wrapper.height() - obj.height()); } if ($(window).scrollTop() >= this.minTop && $(window).scrollTop() < this.maxTop) { obj.removeClass("fixed_bottom").addClass("fixed_top"); } else if ($(window).scrollTop() >= this.maxTop) { obj.removeClass("fixed_top").addClass("fixed_bottom"); } else { obj.removeClass("fixed_top fixed_bottom").css({"top": 0}); } if (w != undefined) { obj.css({"width": w}); } } }, init: function(){ var isiPad = navigator.userAgent.match(/iPad/i) != null; if(!isiPad){ $(window).on("ready load scroll resize", function () { fixObj.fix( $("#js-home-faq"), $("#js-home-faq-q_list"), $(".home-col_230").width() ); }); } } }; var faqNav = { init: function () { var faq = $("#js-home-faq"); if (faq.size()) { console.log('fav') this.faq = faq; this.sidebar = $("#js-home-faq-q_list"); this.content = $("#js-home-faq-rows"); this.indicator = $("#js-home-faq-list-indicator"); this.indicator.show(); $(window).on("load resize scroll", function(){ faqNav.staticVars(); faqNav.scrollVars(); faqNav.setIndicator(); }); } }, staticVars: function () { this.contentHeight = this.content.outerHeight(); this.windowHeight = $(window).outerHeight(); this.viewPercent = this.windowHeight / this.contentHeight; this.sidebarHeight = this.sidebar.outerHeight(); }, scrollVars: function () { this.scrollTop = $(window).scrollTop(); this.contentTop = this.content.offset().top; this.topPercent = (this.scrollTop - this.contentTop) / this.contentHeight; }, setIndicator: function () { var top = this.sidebarHeight * this.topPercent; if (top < 0) { top = 0; } else if (top > (this.sidebarHeight)) { top = this.sidebarHeight; } this.indicator.css({ "top": top }); } }; function scrollTo() { var links = $(".js-scrollto"); links.on("click", function (e) { e.preventDefault(); var id = $(this).attr("href").split("#")[1], obj = $("#" + id); $('html, body').animate({scrollTop: obj.offset().top - 90}, 200, function () { window.location.hash = "#" + id; }); return false; }); }
}, showError: function( name, msg ) {
random_line_split
common.js
$( function() { $( "#slider" ).slider(); } ); $(document).ready(function() { $("body").css({ "opacity": "1", "transition": "opacity 1s ease-in-out" }); // faq scroll faqNav.init(); fixObj.init(); scrollTo(); $(window).on("backstretch.show", function (e, instance, index) { syncBg() $('.overlay').removeClass('show'); }); createBg(); }); /* old $('.list-indicator').fixer({ gap: 100 });*/ //if(!detectmob()) { $('.menu-bg').fixer({ gap: 0 }); //} $( "#show_more" ).click(function() { $('#another-element').slideToggle('slow'); $(this).remove(); }); $(function(){ $('.code-wrapper').on( "mousemove", function(e) { var offsets = $(this).offset(); var fullWidth = $(this).width(); var mouseX = e.pageX - offsets.left; if (mouseX < 0) { mouseX = 0; } else if (mouseX > fullWidth) { mouseX = fullWidth } $(this).parent().find('.divider-bar').css({ left: mouseX, transition: 'none' }); $(this).find('.design-wrapper').css({ transform: 'translateX(' + (mouseX) + 'px)', transition: 'none' }); $(this).find('.design-image').css({ transform: 'translateX(' + (-1*mouseX) + 'px)', transition: 'none' }); }); $('.divider-wrapper').on( "mouseleave", function() { $(this).parent().find('.divider-bar').css({ left: '50%', transition: 'all .3s' }); $(this).find('.design-wrapper').css({ transform: 'translateX(50%)', transition: 'all .3s' }); $(this).find('.design-image').css({ transform: 'translateX(-50%)', transition: 'all .3s' }); }); $('.user-feedback').click(function(e){ e.preventDefault(); var to = $('.contact').offset().top; $('html, body').animate({scrollTop: to - 90}, 'slow'); $('.contact form input').eq(0).focus(); }) }); // end of document ready (function($) { document.addEventListener("touchmove", syncBg, true); window.addEventListener("scroll", syncBg, true); //window.addEventListener("scroll", listIndicate, true); $('[data-bg]').click(function (e) { e.preventDefault(); changeBg( $(this) ); }); }(jQuery)); function createBg() { // if no mobile not use alg if(!detectmob()) { return false; } // set bstretch vs bg $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); if( $bg.hasClass('empty-bg') ) { $bg.css({opacity: 0}); } else { var src = 'img/bg-'+$section.attr('id')+'.png'; $bg.backstretch({url: src, alignY: 0}); $section.backstretch({url: src, alignY: 0}); } }) } function changeBg(j) { var $section = j.parents('.section'); var $bg = $section.find('.menu-bg'); // set active li var $li = j.parent().siblings(); $li.removeClass('active'); j.parent().addClass('active'); // change effect $('.overlay').addClass('show'); var img = new Image(); img.src = 'img/' + j.data('bg'); // if no mobile not use alg if(!detectmob()) { img.onload = function() { if($section.length) { $section.css({'background-image': 'url('+img.src+')'}); } if($bg.length) { $bg.css({'background-image': 'url('+img.src+')'}); } syncBg(); $('.overlay').removeClass('show'); } } // if mobile else { $section.backstretch({url: img.src, alignY: 0}); $bg.backstretch({url: img.src, alignY: 0}); } } function syncBg() { // if no mobile not use alg if(!detectmob()) { return false; } $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); var s = getScroll(); var is_top = ($section.offset().top - s.top); // for empty bgbg if( $bg.hasClass('empty-bg') ) { if ( is_top < 0 ) { $bg.css({opacity: 1}); } else { $bg.css({opacity: 0}); } } // for bg else { if ( is_top < 0 ) { $section.children('.backstretch').css({position: 'fixed'}); $section.find('.menu-bg').css({position: 'fixed'}); } else { $section.children('.backstretch').css({position: 'absolute'}); $section.find('.menu-bg').css({position: 'absolute',top: 0}); } } }) } function listIndicate() { var $section = $('.faq'); if( !$section.length ) return false; var $i = $('.faq .list'); var $pointer = $('.list-indicator'); var s = getScroll(); var is_top = ($section.offset().top - s.top); var faq_height = ($section.height() - 100); var faq_nav_height = $i.height() ; var faq_pointer_height = $pointer.height() ; console.log(faq_height + ' ' +is_top) var bottomstopeffect = -450; var correct = -110; if ( is_top < 0 && is_top > bottomstopeffect ) { $i.css({position: 'fixed', 'top': 120, 'width': $section.width() / 4}); console.log( (faq_height + is_top) / faq_height ) $pointer.css({position: 'fixed', 'top': 120 + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height )}); } else if(is_top < bottomstopeffect) { $i.css({position: 'absolute', 'top': faq_nav_height + correct}); var ptop = faq_nav_height + correct + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height ); if(ptop > 2*faq_nav_height + correct - faq_pointer_height) { ptop = 2*faq_nav_height + correct - faq_pointer_height; } $pointer.css({position: 'absolute',top: ptop }); } else { $i.css({position: 'absolute', 'top': 0}); $pointer.css({position: 'absolute',top: 0}); } } function test(text) { $('.test').text(text); } function detectmob() { return true; if( navigator.userAgent.match(/Android/i) || navigator.userAgent.match(/webOS/i) || navigator.userAgent.match(/iPhone/i) || navigator.userAgent.match(/iPad/i) || navigator.userAgent.match(/iPod/i) || navigator.userAgent.match(/BlackBerry/i) || navigator.userAgent.match(/Windows Phone/i) ){ return true; } else { return false; } } function getScroll()
function initMap(){ // text overlay proto function TxtOverlay(pos, txt, cls, map) { this.pos = pos; this.txt_ = txt; this.cls_ = cls; this.map_ = map; this.div_ = null; this.setMap(map); } TxtOverlay.prototype = new google.maps.OverlayView(); TxtOverlay.prototype.onAdd = function() { var div = document.createElement('DIV'); div.className = this.cls_; div.innerHTML = this.txt_; this.div_ = div; var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; var panes = this.getPanes(); panes.floatPane.appendChild(div); } TxtOverlay.prototype.draw = function() { var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); var div = this.div_; div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; console.log(position) } // create map var point = new google.maps.LatLng(55.774210, 37.520200); var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3'; var myMapOptions = { zoom: 16, center: point, mapTypeId: google.maps.MapTypeId.TERRAIN }; var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions); var image = new google.maps.MarkerImage( 'img/map-image.png', new google.maps.Size(61,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shadow = new google.maps.MarkerImage( 'img/map-shadow.png', new google.maps.Size(106,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shape = { coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65,17,64,17,63,16,62,15,61,14,60,14,59,13,58,12,57,11,56,11,55,10,54,9,53,9,52,8,51,7,50,7,49,6,48,6,47,5,46,4,45,4,44,3,43,3,42,2,41,2,40,2,39,2,38,1,37,1,36,1,35,1,34,1,33,1,32,1,31,1,30,1,29,1,28,1,27,1,26,1,25,1,24,1,23,2,22,2,21,2,20,3,19,3,18,3,17,4,16,4,15,5,14,6,13,6,12,7,11,8,10,9,9,10,8,11,7,12,6,14,5,15,4,17,3,20,2,23,1,37,1], type: 'poly' }; var marker = new google.maps.Marker({ draggable: true, raiseOnDrag: false, icon: image, shadow: shadow, shape: shape, map: map, position: point }); txt = new TxtOverlay(point, tooltipTemplate, "map_label", map) } var forms = { errors: {empty: 'Поле не заполнено', email: 'Это не почта' }, checkemail: function(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); }, prevalidate: function () { $('[name="name"], [name="email"]').css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); }, makeerrfield: function(name) { $('[name="'+name+'"]').css('border', '#ff6850 1px solid'); // $('[name="'+name+'"]').css('color', '#ff6850'); }, showError: function( name, msg ) { forms.makeerrfield(name); $("#error_mes").html(msg); $('[name="'+name+'"]').after( $("#error_mes") ); $("#error_mes").fadeIn("slow"); }, validate: function() { forms.prevalidate(); if( !$('[name="name"]').val() ) { forms.showError('name', forms.errors.empty ); return false; } if( !$('[name="email"]').val() ) { forms.showError('email', forms.errors.empty ); return false; } if( !forms.checkemail( $('[name="email"]').val() ) ) { forms.showError('email', forms.errors.email ); return false; } return true; } } function validate() { return forms.validate(); } /* $(function(){ var field = new Array("name", "email"); $("form").submit(function() { var error=0; $("form").find(":input").each(function() { for(var i=0;i<field.length;i++){ if($(this).attr("name")==field[i]){ if(!$(this).val()){ $(this).css('border', '#ff6850 1px solid'); $(this).css('color', '#ff6850'); error=1; } else{ $(this).css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); } } } }) var email = $("#email").val(); if(!isValidEmailAddress(email)){ error=2; $("#email").css('border', '#ff6850 1px solid'); } if(error==0){ return true; } else{ if(error==1) err_text="Поле не заполнено"; if(error==2) err_text="Это не почта"; $("#error_mes").html(err_text); $("#error_mes").fadeIn("slow"); return false; } }) }); function isValidEmailAddress(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); } */ var fixObj = { fix: function(wrapper, obj, w) { if(wrapper.length && obj.length){ this.minTop = wrapper.offset().top; if (obj.attr("id") == "js-home-howto-pass-btn") { this.maxTop = ($("#js-home-test-wrapper").offset().top + 300) - $(window).height(); } else { this.maxTop = wrapper.offset().top - 50 + (wrapper.height() - obj.height()); } if ($(window).scrollTop() >= this.minTop && $(window).scrollTop() < this.maxTop) { obj.removeClass("fixed_bottom").addClass("fixed_top"); } else if ($(window).scrollTop() >= this.maxTop) { obj.removeClass("fixed_top").addClass("fixed_bottom"); } else { obj.removeClass("fixed_top fixed_bottom").css({"top": 0}); } if (w != undefined) { obj.css({"width": w}); } } }, init: function(){ var isiPad = navigator.userAgent.match(/iPad/i) != null; if(!isiPad){ $(window).on("ready load scroll resize", function () { fixObj.fix( $("#js-home-faq"), $("#js-home-faq-q_list"), $(".home-col_230").width() ); }); } } }; var faqNav = { init: function () { var faq = $("#js-home-faq"); if (faq.size()) { console.log('fav') this.faq = faq; this.sidebar = $("#js-home-faq-q_list"); this.content = $("#js-home-faq-rows"); this.indicator = $("#js-home-faq-list-indicator"); this.indicator.show(); $(window).on("load resize scroll", function(){ faqNav.staticVars(); faqNav.scrollVars(); faqNav.setIndicator(); }); } }, staticVars: function () { this.contentHeight = this.content.outerHeight(); this.windowHeight = $(window).outerHeight(); this.viewPercent = this.windowHeight / this.contentHeight; this.sidebarHeight = this.sidebar.outerHeight(); }, scrollVars: function () { this.scrollTop = $(window).scrollTop(); this.contentTop = this.content.offset().top; this.topPercent = (this.scrollTop - this.contentTop) / this.contentHeight; }, setIndicator: function () { var top = this.sidebarHeight * this.topPercent; if (top < 0) { top = 0; } else if (top > (this.sidebarHeight)) { top = this.sidebarHeight; } this.indicator.css({ "top": top }); } }; function scrollTo() { var links = $(".js-scrollto"); links.on("click", function (e) { e.preventDefault(); var id = $(this).attr("href").split("#")[1], obj = $("#" + id); $('html, body').animate({scrollTop: obj.offset().top - 90}, 200, function () { window.location.hash = "#" + id; }); return false; }); }
{ var data, scrOfX = 0, scrOfY = 0; if( typeof( window.pageYOffset ) == 'number' ) { //Netscape compliant scrOfY = window.pageYOffset; scrOfX = window.pageXOffset; } else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) { //DOM compliant scrOfY = document.body.scrollTop; scrOfX = document.body.scrollLeft; } else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) { //IE6 Strict scrOfY = document.documentElement.scrollTop; scrOfX = document.documentElement.scrollLeft; } data = {'left': scrOfX, 'top' : scrOfY}; return data; }
identifier_body
common.js
$( function() { $( "#slider" ).slider(); } ); $(document).ready(function() { $("body").css({ "opacity": "1", "transition": "opacity 1s ease-in-out" }); // faq scroll faqNav.init(); fixObj.init(); scrollTo(); $(window).on("backstretch.show", function (e, instance, index) { syncBg() $('.overlay').removeClass('show'); }); createBg(); }); /* old $('.list-indicator').fixer({ gap: 100 });*/ //if(!detectmob()) { $('.menu-bg').fixer({ gap: 0 }); //} $( "#show_more" ).click(function() { $('#another-element').slideToggle('slow'); $(this).remove(); }); $(function(){ $('.code-wrapper').on( "mousemove", function(e) { var offsets = $(this).offset(); var fullWidth = $(this).width(); var mouseX = e.pageX - offsets.left; if (mouseX < 0) { mouseX = 0; } else if (mouseX > fullWidth) { mouseX = fullWidth } $(this).parent().find('.divider-bar').css({ left: mouseX, transition: 'none' }); $(this).find('.design-wrapper').css({ transform: 'translateX(' + (mouseX) + 'px)', transition: 'none' }); $(this).find('.design-image').css({ transform: 'translateX(' + (-1*mouseX) + 'px)', transition: 'none' }); }); $('.divider-wrapper').on( "mouseleave", function() { $(this).parent().find('.divider-bar').css({ left: '50%', transition: 'all .3s' }); $(this).find('.design-wrapper').css({ transform: 'translateX(50%)', transition: 'all .3s' }); $(this).find('.design-image').css({ transform: 'translateX(-50%)', transition: 'all .3s' }); }); $('.user-feedback').click(function(e){ e.preventDefault(); var to = $('.contact').offset().top; $('html, body').animate({scrollTop: to - 90}, 'slow'); $('.contact form input').eq(0).focus(); }) }); // end of document ready (function($) { document.addEventListener("touchmove", syncBg, true); window.addEventListener("scroll", syncBg, true); //window.addEventListener("scroll", listIndicate, true); $('[data-bg]').click(function (e) { e.preventDefault(); changeBg( $(this) ); }); }(jQuery)); function createBg() { // if no mobile not use alg if(!detectmob()) { return false; } // set bstretch vs bg $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); if( $bg.hasClass('empty-bg') ) { $bg.css({opacity: 0}); } else { var src = 'img/bg-'+$section.attr('id')+'.png'; $bg.backstretch({url: src, alignY: 0}); $section.backstretch({url: src, alignY: 0}); } }) } function changeBg(j) { var $section = j.parents('.section'); var $bg = $section.find('.menu-bg'); // set active li var $li = j.parent().siblings(); $li.removeClass('active'); j.parent().addClass('active'); // change effect $('.overlay').addClass('show'); var img = new Image(); img.src = 'img/' + j.data('bg'); // if no mobile not use alg if(!detectmob()) { img.onload = function() { if($section.length) { $section.css({'background-image': 'url('+img.src+')'}); } if($bg.length) { $bg.css({'background-image': 'url('+img.src+')'}); } syncBg(); $('.overlay').removeClass('show'); } } // if mobile else { $section.backstretch({url: img.src, alignY: 0}); $bg.backstretch({url: img.src, alignY: 0}); } } function syncBg() { // if no mobile not use alg if(!detectmob()) { return false; } $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); var s = getScroll(); var is_top = ($section.offset().top - s.top); // for empty bgbg if( $bg.hasClass('empty-bg') ) { if ( is_top < 0 ) { $bg.css({opacity: 1}); } else { $bg.css({opacity: 0}); } } // for bg else { if ( is_top < 0 ) { $section.children('.backstretch').css({position: 'fixed'}); $section.find('.menu-bg').css({position: 'fixed'}); } else { $section.children('.backstretch').css({position: 'absolute'}); $section.find('.menu-bg').css({position: 'absolute',top: 0}); } } }) } function listIndicate() { var $section = $('.faq'); if( !$section.length ) return false; var $i = $('.faq .list'); var $pointer = $('.list-indicator'); var s = getScroll(); var is_top = ($section.offset().top - s.top); var faq_height = ($section.height() - 100); var faq_nav_height = $i.height() ; var faq_pointer_height = $pointer.height() ; console.log(faq_height + ' ' +is_top) var bottomstopeffect = -450; var correct = -110; if ( is_top < 0 && is_top > bottomstopeffect ) { $i.css({position: 'fixed', 'top': 120, 'width': $section.width() / 4}); console.log( (faq_height + is_top) / faq_height ) $pointer.css({position: 'fixed', 'top': 120 + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height )}); } else if(is_top < bottomstopeffect) { $i.css({position: 'absolute', 'top': faq_nav_height + correct}); var ptop = faq_nav_height + correct + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height ); if(ptop > 2*faq_nav_height + correct - faq_pointer_height) { ptop = 2*faq_nav_height + correct - faq_pointer_height; } $pointer.css({position: 'absolute',top: ptop }); } else { $i.css({position: 'absolute', 'top': 0}); $pointer.css({position: 'absolute',top: 0}); } } function test(text) { $('.test').text(text); } function detectmob() { return true; if( navigator.userAgent.match(/Android/i) || navigator.userAgent.match(/webOS/i) || navigator.userAgent.match(/iPhone/i) || navigator.userAgent.match(/iPad/i) || navigator.userAgent.match(/iPod/i) || navigator.userAgent.match(/BlackBerry/i) || navigator.userAgent.match(/Windows Phone/i) ){ return true; } else { return false; } } function getScroll() { var data, scrOfX = 0, scrOfY = 0; if( typeof( window.pageYOffset ) == 'number' ) { //Netscape compliant scrOfY = window.pageYOffset; scrOfX = window.pageXOffset; } else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) { //DOM compliant scrOfY = document.body.scrollTop; scrOfX = document.body.scrollLeft; } else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) { //IE6 Strict scrOfY = document.documentElement.scrollTop; scrOfX = document.documentElement.scrollLeft; } data = {'left': scrOfX, 'top' : scrOfY}; return data; } function initMap(){ // text overlay proto function TxtOverlay(pos, txt, cls, map) { this.pos = pos; this.txt_ = txt; this.cls_ = cls; this.map_ = map; this.div_ = null; this.setMap(map); } TxtOverlay.prototype = new google.maps.OverlayView(); TxtOverlay.prototype.onAdd = function() { var div = document.createElement('DIV'); div.className = this.cls_; div.innerHTML = this.txt_; this.div_ = div; var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; var panes = this.getPanes(); panes.floatPane.appendChild(div); } TxtOverlay.prototype.draw = function() { var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); var div = this.div_; div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; console.log(position) } // create map var point = new google.maps.LatLng(55.774210, 37.520200); var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3'; var myMapOptions = { zoom: 16, center: point, mapTypeId: google.maps.MapTypeId.TERRAIN }; var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions); var image = new google.maps.MarkerImage( 'img/map-image.png', new google.maps.Size(61,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shadow = new google.maps.MarkerImage( 'img/map-shadow.png', new google.maps.Size(106,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shape = { coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65,17,64,17,63,16,62,15,61,14,60,14,59,13,58,12,57,11,56,11,55,10,54,9,53,9,52,8,51,7,50,7,49,6,48,6,47,5,46,4,45,4,44,3,43,3,42,2,41,2,40,2,39,2,38,1,37,1,36,1,35,1,34,1,33,1,32,1,31,1,30,1,29,1,28,1,27,1,26,1,25,1,24,1,23,2,22,2,21,2,20,3,19,3,18,3,17,4,16,4,15,5,14,6,13,6,12,7,11,8,10,9,9,10,8,11,7,12,6,14,5,15,4,17,3,20,2,23,1,37,1], type: 'poly' }; var marker = new google.maps.Marker({ draggable: true, raiseOnDrag: false, icon: image, shadow: shadow, shape: shape, map: map, position: point }); txt = new TxtOverlay(point, tooltipTemplate, "map_label", map) } var forms = { errors: {empty: 'Поле не заполнено', email: 'Это не почта' }, checkemail: function(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); }, prevalidate: function () { $('[name="name"], [name="email"]').css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); }, makeerrfield: function(name) { $('[name="'+name+'"]').css('border', '#ff6850 1px solid'); // $('[name="'+name+'"]').css('color', '#ff6850'); }, showError: function( name, msg ) { forms.makeerrfield(name); $("#error_mes").html(msg); $('[name="'+name+'"]').after( $("#error_mes") ); $("#error_mes").fadeIn("slow"); }, validate: function() { forms.prevalidate(); if( !$('[name="name"]').val() ) { forms.showError('name', forms.errors.empty ); return false; } if( !$('[name="email"]').val() ) { forms.showError('email', forms.errors.empty ); return false; } if( !forms.checkemail( $('[name="email"]').val() ) ) { forms.showError('email', forms.errors.email ); return false; } return true; } } function validate() { return forms.validate(); } /* $(function(){ var field = new Array("name", "email"); $("form").submit(function() { var error=0; $("form").find(":input").each(function() { for(var i=0;i<field.length;i++){ if($(this).attr("name")==field[i]){ if(!$(this).val()){ $(this).css('border', '#ff6850 1px solid'); $(this).css('color', '#ff6850'); error=1; } else{ $(this).css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); } } } }) var email = $("#email").val(); if(!isValidEmailAddress(email)){ error=2; $("#email").css('border', '#ff6850 1px solid'); } if(error==0){ return true; } else{ if(error==1) err_text="Поле не заполнено"; if(error==2) err_text="Это не почта"; $("#error_mes").html(err_text); $("#error_mes").fadeIn("slow"); return false; } }) }); function isValidEmailAddress(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); } */ var fixObj = { fix: function(wrapper, obj, w) { if(wrapper.length && obj.length){ this.minTop = wrapper.offset().top; if (obj.attr("id") == "js-home-howto-pass-btn") { this.maxTop = ($("#js-home-test-wrapper").offset().top + 300) - $(window).height(); } else { this.maxTop = wrapper.offset().top - 50 + (wrapper.height() - obj.height()); } if ($(window).scrollTop() >= this.minTop && $(window).scrollTop() < this.maxTop) { obj.removeClass("fixed_bottom").addClass("fixed_top"); } else
ed_top").addClass("fixed_bottom"); } else { obj.removeClass("fixed_top fixed_bottom").css({"top": 0}); } if (w != undefined) { obj.css({"width": w}); } } }, init: function(){ var isiPad = navigator.userAgent.match(/iPad/i) != null; if(!isiPad){ $(window).on("ready load scroll resize", function () { fixObj.fix( $("#js-home-faq"), $("#js-home-faq-q_list"), $(".home-col_230").width() ); }); } } }; var faqNav = { init: function () { var faq = $("#js-home-faq"); if (faq.size()) { console.log('fav') this.faq = faq; this.sidebar = $("#js-home-faq-q_list"); this.content = $("#js-home-faq-rows"); this.indicator = $("#js-home-faq-list-indicator"); this.indicator.show(); $(window).on("load resize scroll", function(){ faqNav.staticVars(); faqNav.scrollVars(); faqNav.setIndicator(); }); } }, staticVars: function () { this.contentHeight = this.content.outerHeight(); this.windowHeight = $(window).outerHeight(); this.viewPercent = this.windowHeight / this.contentHeight; this.sidebarHeight = this.sidebar.outerHeight(); }, scrollVars: function () { this.scrollTop = $(window).scrollTop(); this.contentTop = this.content.offset().top; this.topPercent = (this.scrollTop - this.contentTop) / this.contentHeight; }, setIndicator: function () { var top = this.sidebarHeight * this.topPercent; if (top < 0) { top = 0; } else if (top > (this.sidebarHeight)) { top = this.sidebarHeight; } this.indicator.css({ "top": top }); } }; function scrollTo() { var links = $(".js-scrollto"); links.on("click", function (e) { e.preventDefault(); var id = $(this).attr("href").split("#")[1], obj = $("#" + id); $('html, body').animate({scrollTop: obj.offset().top - 90}, 200, function () { window.location.hash = "#" + id; }); return false; }); }
if ($(window).scrollTop() >= this.maxTop) { obj.removeClass("fix
conditional_block
common.js
$( function() { $( "#slider" ).slider(); } ); $(document).ready(function() { $("body").css({ "opacity": "1", "transition": "opacity 1s ease-in-out" }); // faq scroll faqNav.init(); fixObj.init(); scrollTo(); $(window).on("backstretch.show", function (e, instance, index) { syncBg() $('.overlay').removeClass('show'); }); createBg(); }); /* old $('.list-indicator').fixer({ gap: 100 });*/ //if(!detectmob()) { $('.menu-bg').fixer({ gap: 0 }); //} $( "#show_more" ).click(function() { $('#another-element').slideToggle('slow'); $(this).remove(); }); $(function(){ $('.code-wrapper').on( "mousemove", function(e) { var offsets = $(this).offset(); var fullWidth = $(this).width(); var mouseX = e.pageX - offsets.left; if (mouseX < 0) { mouseX = 0; } else if (mouseX > fullWidth) { mouseX = fullWidth } $(this).parent().find('.divider-bar').css({ left: mouseX, transition: 'none' }); $(this).find('.design-wrapper').css({ transform: 'translateX(' + (mouseX) + 'px)', transition: 'none' }); $(this).find('.design-image').css({ transform: 'translateX(' + (-1*mouseX) + 'px)', transition: 'none' }); }); $('.divider-wrapper').on( "mouseleave", function() { $(this).parent().find('.divider-bar').css({ left: '50%', transition: 'all .3s' }); $(this).find('.design-wrapper').css({ transform: 'translateX(50%)', transition: 'all .3s' }); $(this).find('.design-image').css({ transform: 'translateX(-50%)', transition: 'all .3s' }); }); $('.user-feedback').click(function(e){ e.preventDefault(); var to = $('.contact').offset().top; $('html, body').animate({scrollTop: to - 90}, 'slow'); $('.contact form input').eq(0).focus(); }) }); // end of document ready (function($) { document.addEventListener("touchmove", syncBg, true); window.addEventListener("scroll", syncBg, true); //window.addEventListener("scroll", listIndicate, true); $('[data-bg]').click(function (e) { e.preventDefault(); changeBg( $(this) ); }); }(jQuery)); function createBg() { // if no mobile not use alg if(!detectmob()) { return false; } // set bstretch vs bg $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); if( $bg.hasClass('empty-bg') ) { $bg.css({opacity: 0}); } else { var src = 'img/bg-'+$section.attr('id')+'.png'; $bg.backstretch({url: src, alignY: 0}); $section.backstretch({url: src, alignY: 0}); } }) } function changeBg(j) { var $section = j.parents('.section'); var $bg = $section.find('.menu-bg'); // set active li var $li = j.parent().siblings(); $li.removeClass('active'); j.parent().addClass('active'); // change effect $('.overlay').addClass('show'); var img = new Image(); img.src = 'img/' + j.data('bg'); // if no mobile not use alg if(!detectmob()) { img.onload = function() { if($section.length) { $section.css({'background-image': 'url('+img.src+')'}); } if($bg.length) { $bg.css({'background-image': 'url('+img.src+')'}); } syncBg(); $('.overlay').removeClass('show'); } } // if mobile else { $section.backstretch({url: img.src, alignY: 0}); $bg.backstretch({url: img.src, alignY: 0}); } } function syncBg() { // if no mobile not use alg if(!detectmob()) { return false; } $.each( $('.menu-bg.img-bg'), function(i) { var $bg = $(this); var $section = $bg.parent(); var s = getScroll(); var is_top = ($section.offset().top - s.top); // for empty bgbg if( $bg.hasClass('empty-bg') ) { if ( is_top < 0 ) { $bg.css({opacity: 1}); } else { $bg.css({opacity: 0}); } } // for bg else { if ( is_top < 0 ) { $section.children('.backstretch').css({position: 'fixed'}); $section.find('.menu-bg').css({position: 'fixed'}); } else { $section.children('.backstretch').css({position: 'absolute'}); $section.find('.menu-bg').css({position: 'absolute',top: 0}); } } }) } function
() { var $section = $('.faq'); if( !$section.length ) return false; var $i = $('.faq .list'); var $pointer = $('.list-indicator'); var s = getScroll(); var is_top = ($section.offset().top - s.top); var faq_height = ($section.height() - 100); var faq_nav_height = $i.height() ; var faq_pointer_height = $pointer.height() ; console.log(faq_height + ' ' +is_top) var bottomstopeffect = -450; var correct = -110; if ( is_top < 0 && is_top > bottomstopeffect ) { $i.css({position: 'fixed', 'top': 120, 'width': $section.width() / 4}); console.log( (faq_height + is_top) / faq_height ) $pointer.css({position: 'fixed', 'top': 120 + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height )}); } else if(is_top < bottomstopeffect) { $i.css({position: 'absolute', 'top': faq_nav_height + correct}); var ptop = faq_nav_height + correct + faq_nav_height * ( 1 - (faq_height + is_top) / faq_height ); if(ptop > 2*faq_nav_height + correct - faq_pointer_height) { ptop = 2*faq_nav_height + correct - faq_pointer_height; } $pointer.css({position: 'absolute',top: ptop }); } else { $i.css({position: 'absolute', 'top': 0}); $pointer.css({position: 'absolute',top: 0}); } } function test(text) { $('.test').text(text); } function detectmob() { return true; if( navigator.userAgent.match(/Android/i) || navigator.userAgent.match(/webOS/i) || navigator.userAgent.match(/iPhone/i) || navigator.userAgent.match(/iPad/i) || navigator.userAgent.match(/iPod/i) || navigator.userAgent.match(/BlackBerry/i) || navigator.userAgent.match(/Windows Phone/i) ){ return true; } else { return false; } } function getScroll() { var data, scrOfX = 0, scrOfY = 0; if( typeof( window.pageYOffset ) == 'number' ) { //Netscape compliant scrOfY = window.pageYOffset; scrOfX = window.pageXOffset; } else if( document.body && ( document.body.scrollLeft || document.body.scrollTop ) ) { //DOM compliant scrOfY = document.body.scrollTop; scrOfX = document.body.scrollLeft; } else if( document.documentElement && ( document.documentElement.scrollLeft || document.documentElement.scrollTop ) ) { //IE6 Strict scrOfY = document.documentElement.scrollTop; scrOfX = document.documentElement.scrollLeft; } data = {'left': scrOfX, 'top' : scrOfY}; return data; } function initMap(){ // text overlay proto function TxtOverlay(pos, txt, cls, map) { this.pos = pos; this.txt_ = txt; this.cls_ = cls; this.map_ = map; this.div_ = null; this.setMap(map); } TxtOverlay.prototype = new google.maps.OverlayView(); TxtOverlay.prototype.onAdd = function() { var div = document.createElement('DIV'); div.className = this.cls_; div.innerHTML = this.txt_; this.div_ = div; var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; var panes = this.getPanes(); panes.floatPane.appendChild(div); } TxtOverlay.prototype.draw = function() { var overlayProjection = this.getProjection(); var position = overlayProjection.fromLatLngToDivPixel(this.pos); var div = this.div_; div.style.left = position.x + 'px'; div.style.top = position.y + 'px'; console.log(position) } // create map var point = new google.maps.LatLng(55.774210, 37.520200); var tooltipTemplate = '4-я Магистральная, дом 5, подъезд 3, этаж 3'; var myMapOptions = { zoom: 16, center: point, mapTypeId: google.maps.MapTypeId.TERRAIN }; var map = new google.maps.Map(document.getElementById("map_canvas"),myMapOptions); var image = new google.maps.MarkerImage( 'img/map-image.png', new google.maps.Size(61,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shadow = new google.maps.MarkerImage( 'img/map-shadow.png', new google.maps.Size(106,82), new google.maps.Point(0,0), new google.maps.Point(31,82) ); var shape = { coord: [37,1,40,2,43,3,45,4,46,5,48,6,49,7,50,8,51,9,52,10,53,11,54,12,54,13,55,14,56,15,56,16,57,17,57,18,57,19,58,20,58,21,58,22,59,23,60,24,60,25,60,26,60,27,60,28,60,29,60,30,60,31,60,32,60,33,60,34,59,35,59,36,59,37,59,38,59,39,58,40,58,41,58,42,57,43,57,44,57,45,56,46,56,47,55,48,54,49,54,50,53,51,52,52,52,53,51,54,50,55,50,56,49,57,48,58,48,59,47,60,46,61,45,62,45,63,44,64,43,65,43,66,43,67,42,68,41,69,40,70,40,71,39,72,38,73,37,74,36,75,36,76,35,77,34,78,33,79,32,80,31,81,30,81,29,80,28,79,27,78,26,77,25,76,25,75,24,74,23,73,22,72,22,71,21,70,20,69,20,68,19,67,18,66,17,65,17,64,17,63,16,62,15,61,14,60,14,59,13,58,12,57,11,56,11,55,10,54,9,53,9,52,8,51,7,50,7,49,6,48,6,47,5,46,4,45,4,44,3,43,3,42,2,41,2,40,2,39,2,38,1,37,1,36,1,35,1,34,1,33,1,32,1,31,1,30,1,29,1,28,1,27,1,26,1,25,1,24,1,23,2,22,2,21,2,20,3,19,3,18,3,17,4,16,4,15,5,14,6,13,6,12,7,11,8,10,9,9,10,8,11,7,12,6,14,5,15,4,17,3,20,2,23,1,37,1], type: 'poly' }; var marker = new google.maps.Marker({ draggable: true, raiseOnDrag: false, icon: image, shadow: shadow, shape: shape, map: map, position: point }); txt = new TxtOverlay(point, tooltipTemplate, "map_label", map) } var forms = { errors: {empty: 'Поле не заполнено', email: 'Это не почта' }, checkemail: function(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); }, prevalidate: function () { $('[name="name"], [name="email"]').css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); }, makeerrfield: function(name) { $('[name="'+name+'"]').css('border', '#ff6850 1px solid'); // $('[name="'+name+'"]').css('color', '#ff6850'); }, showError: function( name, msg ) { forms.makeerrfield(name); $("#error_mes").html(msg); $('[name="'+name+'"]').after( $("#error_mes") ); $("#error_mes").fadeIn("slow"); }, validate: function() { forms.prevalidate(); if( !$('[name="name"]').val() ) { forms.showError('name', forms.errors.empty ); return false; } if( !$('[name="email"]').val() ) { forms.showError('email', forms.errors.empty ); return false; } if( !forms.checkemail( $('[name="email"]').val() ) ) { forms.showError('email', forms.errors.email ); return false; } return true; } } function validate() { return forms.validate(); } /* $(function(){ var field = new Array("name", "email"); $("form").submit(function() { var error=0; $("form").find(":input").each(function() { for(var i=0;i<field.length;i++){ if($(this).attr("name")==field[i]){ if(!$(this).val()){ $(this).css('border', '#ff6850 1px solid'); $(this).css('color', '#ff6850'); error=1; } else{ $(this).css('border', 'rgba(0, 0, 0, 0.2) 1px solid'); } } } }) var email = $("#email").val(); if(!isValidEmailAddress(email)){ error=2; $("#email").css('border', '#ff6850 1px solid'); } if(error==0){ return true; } else{ if(error==1) err_text="Поле не заполнено"; if(error==2) err_text="Это не почта"; $("#error_mes").html(err_text); $("#error_mes").fadeIn("slow"); return false; } }) }); function isValidEmailAddress(emailAddress) { var pattern = new RegExp(/^(("[\w-\s]+")|([\w-]+(?:\.[\w-]+)*)|("[\w-\s]+")([\w-]+(?:\.[\w-]+)*))(@((?:[\w-]+\.)*\w[\w-]{0,66})\.([a-z]{2,6}(?:\.[a-z]{2})?)$)|(@\[?((25[0-5]\.|2[0-4][0-9]\.|1[0-9]{2}\.|[0-9]{1,2}\.))((25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\.){2}(25[0-5]|2[0-4][0-9]|1[0-9]{2}|[0-9]{1,2})\]?$)/i); return pattern.test(emailAddress); } */ var fixObj = { fix: function(wrapper, obj, w) { if(wrapper.length && obj.length){ this.minTop = wrapper.offset().top; if (obj.attr("id") == "js-home-howto-pass-btn") { this.maxTop = ($("#js-home-test-wrapper").offset().top + 300) - $(window).height(); } else { this.maxTop = wrapper.offset().top - 50 + (wrapper.height() - obj.height()); } if ($(window).scrollTop() >= this.minTop && $(window).scrollTop() < this.maxTop) { obj.removeClass("fixed_bottom").addClass("fixed_top"); } else if ($(window).scrollTop() >= this.maxTop) { obj.removeClass("fixed_top").addClass("fixed_bottom"); } else { obj.removeClass("fixed_top fixed_bottom").css({"top": 0}); } if (w != undefined) { obj.css({"width": w}); } } }, init: function(){ var isiPad = navigator.userAgent.match(/iPad/i) != null; if(!isiPad){ $(window).on("ready load scroll resize", function () { fixObj.fix( $("#js-home-faq"), $("#js-home-faq-q_list"), $(".home-col_230").width() ); }); } } }; var faqNav = { init: function () { var faq = $("#js-home-faq"); if (faq.size()) { console.log('fav') this.faq = faq; this.sidebar = $("#js-home-faq-q_list"); this.content = $("#js-home-faq-rows"); this.indicator = $("#js-home-faq-list-indicator"); this.indicator.show(); $(window).on("load resize scroll", function(){ faqNav.staticVars(); faqNav.scrollVars(); faqNav.setIndicator(); }); } }, staticVars: function () { this.contentHeight = this.content.outerHeight(); this.windowHeight = $(window).outerHeight(); this.viewPercent = this.windowHeight / this.contentHeight; this.sidebarHeight = this.sidebar.outerHeight(); }, scrollVars: function () { this.scrollTop = $(window).scrollTop(); this.contentTop = this.content.offset().top; this.topPercent = (this.scrollTop - this.contentTop) / this.contentHeight; }, setIndicator: function () { var top = this.sidebarHeight * this.topPercent; if (top < 0) { top = 0; } else if (top > (this.sidebarHeight)) { top = this.sidebarHeight; } this.indicator.css({ "top": top }); } }; function scrollTo() { var links = $(".js-scrollto"); links.on("click", function (e) { e.preventDefault(); var id = $(this).attr("href").split("#")[1], obj = $("#" + id); $('html, body').animate({scrollTop: obj.offset().top - 90}, 200, function () { window.location.hash = "#" + id; }); return false; }); }
listIndicate
identifier_name
dsm2dtm.py
""" dsm2dtm - Generate DTM (Digital Terrain Model) from DSM (Digital Surface Model) Author: Naman Jain naman.jain@btech2015.iitgn.ac.in www.namanji.wixsite.com/naman/ """ import os import numpy as np import rasterio import argparse try: import gdal except: from osgeo import gdal def downsample_raster(in_path, out_path, downsampling_factor): gdal_raster = gdal.Open(in_path) width, height = gdal_raster.RasterXSize, gdal_raster.RasterYSize gdal.Translate( out_path, in_path, width=int((width // downsampling_factor)), height=int((height // downsampling_factor)), outputType=gdal.GDT_Float32, ) def upsample_raster(in_path, out_path, target_height, target_width): gdal.Translate( out_path, in_path, width=target_width, height=target_height, resampleAlg="bilinear", outputType=gdal.GDT_Float32, ) def generate_slope_raster(in_path, out_path): """ Generates a slope raster from the input DEM raster. Input: in_path: {string} path to the DEM raster Output: out_path: {string} path to the generated slope image """ cmd = "gdaldem slope -alg ZevenbergenThorne {} {}".format(in_path, out_path) os.system(cmd) def get_mean(raster_path, ignore_value=-9999.0):
def extract_dtm(dsm_path, ground_dem_path, non_ground_dem_path, radius, terrain_slope): """ Generates a ground DEM and non-ground DEM raster from the input DSM raster. Input: dsm_path: {string} path to the DSM raster radius: {int} Search radius of kernel in cells. terrain_slope: {float} average slope of the input terrain Output: ground_dem_path: {string} path to the generated ground DEM raster non_ground_dem_path: {string} path to the generated non-ground DEM raster """ cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format( dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path ) os.system(cmd) def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0): """ Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster. Replaces values in those pixels with No data Value (-99999.0) Input: ground_dem_path: {string} path to the generated ground DEM raster no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value) Output: out_path: {string} path to the filtered ground DEM raster """ ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray()) std = ground_np[ground_np != ignore_value].std() mean = ground_np[ground_np != ignore_value].mean() threshold_value = mean + 1.5 * std ground_np[ground_np >= threshold_value] = -99999.0 save_array_as_geotif(ground_np, ground_dem_path, out_path) def save_array_as_geotif(array, source_tif_path, out_path): """ Generates a geotiff raster from the input numpy array (height * width * depth) Input: array: {numpy array} numpy array to be saved as geotiff source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted. Output: out_path: {string} path to the generated Geotiff raster """ if len(array.shape) > 2: height, width, depth = array.shape else: height, width = array.shape depth = 1 source_tif = gdal.Open(source_tif_path) driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32) if depth != 1: for i in range(depth): dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i]) else: dataset.GetRasterBand(1).WriteArray(array) geotrans = source_tif.GetGeoTransform() proj = source_tif.GetProjection() dataset.SetGeoTransform(geotrans) dataset.SetProjection(proj) dataset.FlushCache() dataset = None def sdat_to_gtiff(sdat_raster_path, out_gtiff_path): gdal.Translate( out_gtiff_path, sdat_raster_path, format="GTiff", ) def close_gaps(in_path, out_path, threshold=0.1): """ Interpolates the holes (no data value) in the input raster. Input: in_path: {string} path to the input raster with holes threshold: {float} Tension Threshold Output: out_path: {string} path to the generated raster with closed holes. """ cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format( in_path, threshold, out_path ) os.system(cmd) def smoothen_raster(in_path, out_path, radius=2): """ Applies gaussian filter to the input raster. Input: in_path: {string} path to the input raster radius: {int} kernel radius to be used for smoothing Output: out_path: {string} path to the generated smoothened raster """ cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format( in_path, out_path, radius ) os.system(cmd) def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0): cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format( rasterA_path, rasterB_path, out_path, no_data_value ) os.system(cmd) def replace_values( rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98 ): """ Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB Input: rasterA_path: {string} path to the input rasterA rasterB_path: {string} path to the input rasterB Output: out_path: {string} path to the generated raster """ cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format( rasterA_path, no_data_value, rasterB_path, out_path, no_data_value, threshold, threshold, ) os.system(cmd) def expand_holes_in_raster( in_path, search_window=7, no_data_value=-99999.0, threshold=50 ): """ Expands holes (cells with no_data_value) in the input raster. Input: in_path: {string} path to the input raster search_window: {int} kernel size to be used as window threshold: {float} threshold on percentage of cells with no_data_value Output: np_raster: {numpy array} Returns the modified input raster's array """ np_raster = np.array(gdal.Open(in_path).ReadAsArray()) height, width = np_raster.shape[0], np_raster.shape[1] for i in range(int((search_window - 1) / 2), width, 1): for j in range(int((search_window - 1) / 2), height, 1): window = np_raster[ int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2) + search_window, int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2) + search_window, ] if ( np.count_nonzero(window == no_data_value) >= (threshold * search_window ** 2) / 100 ): try: np_raster[i, j] = no_data_value except: pass return np_raster def get_raster_crs(raster_path): """ Returns the CRS (Coordinate Reference System) of the raster Input: raster_path: {string} path to the source tif image """ raster = rasterio.open(raster_path) return raster.crs def get_raster_resolution(raster_path): raster = gdal.Open(raster_path) raster_geotrans = raster.GetGeoTransform() x_res = raster_geotrans[1] y_res = -raster_geotrans[5] return x_res, y_res def get_res_and_downsample(dsm_path, temp_dir): # check DSM resolution. Downsample if DSM is of very high resolution to save processing time. x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res < 0.3 or y_res < 0.3: target_res = 0.3 # downsample to this resolution (in meters) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path else: if x_res < 2.514e-06 or y_res < 2.514e-06: target_res = 2.514e-06 # downsample to this resolution (in degrees) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path return dsm_path def get_updated_params(dsm_path, search_radius, smoothen_radius): # search_radius and smoothen_radius are set wrt to 30cm DSM # returns updated parameters if DSM is of coarser resolution x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res > 0.3 or y_res > 0.3: search_radius = int((min(x_res, y_res) * search_radius) / 0.3) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 0.3) else: if x_res > 2.514e-06 or y_res > 2.514e-06: search_radius = int((min(x_res, y_res) * search_radius) / 2.514e-06) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 2.514e-06) return search_radius, smoothen_radius def main( dsm_path, out_dir, search_radius=40, smoothen_radius=45, dsm_replace_threshold_val=0.98, ): # master function that calls all other functions os.makedirs(out_dir, exist_ok=True) temp_dir = os.path.join(out_dir, "temp_files") os.makedirs(temp_dir, exist_ok=True) dsm_path = get_res_and_downsample(dsm_path, temp_dir) # get updated params wrt to DSM resolution search_radius, smoothen_radius = get_updated_params( dsm_path, search_radius, smoothen_radius ) # Generate DTM # STEP 1: Generate slope raster from dsm to get average slope value dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_slp_path = os.path.join(temp_dir, dsm_name + "_slp.tif") generate_slope_raster(dsm_path, dsm_slp_path) avg_slp = int(get_mean(dsm_slp_path)) # STEP 2: Split DSM into ground and non-ground surface rasters ground_dem_path = os.path.join(temp_dir, dsm_name + "_ground.sdat") non_ground_dem_path = os.path.join(temp_dir, dsm_name + "_non_ground.sdat") extract_dtm( dsm_path, ground_dem_path, non_ground_dem_path, search_radius, avg_slp, ) # STEP 3: Applying Gaussian Filter on the generated ground raster (parameters: radius = 45, mode = Circle) smoothened_ground_path = os.path.join(temp_dir, dsm_name + "_ground_smth.sdat") smoothen_raster(ground_dem_path, smoothened_ground_path, smoothen_radius) # STEP 4: Generating a difference raster (ground DEM - smoothened ground DEM) diff_raster_path = os.path.join(temp_dir, dsm_name + "_ground_diff.sdat") subtract_rasters(ground_dem_path, smoothened_ground_path, diff_raster_path) # STEP 5: Thresholding on the difference raster to replace values in Ground DEM by no-data values (threshold = 0.98) thresholded_ground_path = os.path.join( temp_dir, dsm_name + "_ground_thresholded.sdat" ) replace_values( ground_dem_path, diff_raster_path, thresholded_ground_path, threshold=dsm_replace_threshold_val, ) # STEP 6: Removing noisy spikes from the generated DTM ground_dem_filtered_path = os.path.join(temp_dir, dsm_name + "_ground_filtered.tif") remove_noise(thresholded_ground_path, ground_dem_filtered_path) # STEP 7: Expanding holes in the thresholded ground raster bigger_holes_ground_path = os.path.join( temp_dir, dsm_name + "_ground_bigger_holes.sdat" ) temp = expand_holes_in_raster(ground_dem_filtered_path) save_array_as_geotif(temp, ground_dem_filtered_path, bigger_holes_ground_path) # STEP 8: Close gaps in the DTM dtm_path = os.path.join(temp_dir, dsm_name + "_dtm.sdat") close_gaps(bigger_holes_ground_path, dtm_path) # STEP 9: Convert to GeoTiff dtm_array = gdal.Open(dtm_path).ReadAsArray() dtm_tif_path = os.path.join(out_dir, dsm_name + "_dtm.tif") # save_array_as_geotif(dtm_array, dsm_path, dtm_tif_path) sdat_to_gtiff(dtm_path, dtm_tif_path) return dtm_tif_path # ----------------------------------------------------------------------------------------------------- if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate DTM from DSM") parser.add_argument("--dsm", help="dsm path string") args = parser.parse_args() dsm_path = args.dsm out_dir = "generated_dtm" dtm_path = main(dsm_path, out_dir) print("######### DTM generated at: ", dtm_path)
np_raster = np.array(gdal.Open(raster_path).ReadAsArray()) return np_raster[np_raster != ignore_value].mean()
identifier_body
dsm2dtm.py
""" dsm2dtm - Generate DTM (Digital Terrain Model) from DSM (Digital Surface Model) Author: Naman Jain naman.jain@btech2015.iitgn.ac.in www.namanji.wixsite.com/naman/ """ import os import numpy as np import rasterio import argparse try: import gdal except: from osgeo import gdal def downsample_raster(in_path, out_path, downsampling_factor): gdal_raster = gdal.Open(in_path) width, height = gdal_raster.RasterXSize, gdal_raster.RasterYSize gdal.Translate( out_path, in_path, width=int((width // downsampling_factor)), height=int((height // downsampling_factor)), outputType=gdal.GDT_Float32, ) def upsample_raster(in_path, out_path, target_height, target_width): gdal.Translate( out_path, in_path, width=target_width, height=target_height, resampleAlg="bilinear", outputType=gdal.GDT_Float32, ) def generate_slope_raster(in_path, out_path): """ Generates a slope raster from the input DEM raster. Input: in_path: {string} path to the DEM raster Output: out_path: {string} path to the generated slope image """ cmd = "gdaldem slope -alg ZevenbergenThorne {} {}".format(in_path, out_path) os.system(cmd) def get_mean(raster_path, ignore_value=-9999.0): np_raster = np.array(gdal.Open(raster_path).ReadAsArray()) return np_raster[np_raster != ignore_value].mean() def extract_dtm(dsm_path, ground_dem_path, non_ground_dem_path, radius, terrain_slope): """ Generates a ground DEM and non-ground DEM raster from the input DSM raster. Input: dsm_path: {string} path to the DSM raster radius: {int} Search radius of kernel in cells. terrain_slope: {float} average slope of the input terrain Output: ground_dem_path: {string} path to the generated ground DEM raster non_ground_dem_path: {string} path to the generated non-ground DEM raster """ cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format( dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path ) os.system(cmd) def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0): """ Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster. Replaces values in those pixels with No data Value (-99999.0) Input: ground_dem_path: {string} path to the generated ground DEM raster no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value) Output: out_path: {string} path to the filtered ground DEM raster """ ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray()) std = ground_np[ground_np != ignore_value].std() mean = ground_np[ground_np != ignore_value].mean() threshold_value = mean + 1.5 * std ground_np[ground_np >= threshold_value] = -99999.0 save_array_as_geotif(ground_np, ground_dem_path, out_path) def save_array_as_geotif(array, source_tif_path, out_path): """ Generates a geotiff raster from the input numpy array (height * width * depth) Input: array: {numpy array} numpy array to be saved as geotiff source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted. Output: out_path: {string} path to the generated Geotiff raster """ if len(array.shape) > 2: height, width, depth = array.shape else: height, width = array.shape depth = 1 source_tif = gdal.Open(source_tif_path) driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32) if depth != 1: for i in range(depth): dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i]) else: dataset.GetRasterBand(1).WriteArray(array) geotrans = source_tif.GetGeoTransform() proj = source_tif.GetProjection() dataset.SetGeoTransform(geotrans) dataset.SetProjection(proj) dataset.FlushCache() dataset = None def sdat_to_gtiff(sdat_raster_path, out_gtiff_path): gdal.Translate( out_gtiff_path, sdat_raster_path, format="GTiff", ) def close_gaps(in_path, out_path, threshold=0.1): """ Interpolates the holes (no data value) in the input raster. Input: in_path: {string} path to the input raster with holes threshold: {float} Tension Threshold Output: out_path: {string} path to the generated raster with closed holes. """ cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format( in_path, threshold, out_path ) os.system(cmd) def smoothen_raster(in_path, out_path, radius=2): """ Applies gaussian filter to the input raster. Input: in_path: {string} path to the input raster radius: {int} kernel radius to be used for smoothing Output: out_path: {string} path to the generated smoothened raster """ cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format( in_path, out_path, radius ) os.system(cmd) def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0): cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format( rasterA_path, rasterB_path, out_path, no_data_value ) os.system(cmd) def replace_values( rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98 ): """ Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB Input: rasterA_path: {string} path to the input rasterA rasterB_path: {string} path to the input rasterB Output: out_path: {string} path to the generated raster """ cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format( rasterA_path, no_data_value, rasterB_path, out_path, no_data_value, threshold, threshold, ) os.system(cmd) def expand_holes_in_raster( in_path, search_window=7, no_data_value=-99999.0, threshold=50 ): """ Expands holes (cells with no_data_value) in the input raster. Input: in_path: {string} path to the input raster search_window: {int} kernel size to be used as window threshold: {float} threshold on percentage of cells with no_data_value Output: np_raster: {numpy array} Returns the modified input raster's array """ np_raster = np.array(gdal.Open(in_path).ReadAsArray()) height, width = np_raster.shape[0], np_raster.shape[1] for i in range(int((search_window - 1) / 2), width, 1): for j in range(int((search_window - 1) / 2), height, 1): window = np_raster[ int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2) + search_window, int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2) + search_window, ] if ( np.count_nonzero(window == no_data_value) >= (threshold * search_window ** 2) / 100 ): try: np_raster[i, j] = no_data_value except: pass return np_raster def get_raster_crs(raster_path): """ Returns the CRS (Coordinate Reference System) of the raster Input: raster_path: {string} path to the source tif image """ raster = rasterio.open(raster_path) return raster.crs def get_raster_resolution(raster_path): raster = gdal.Open(raster_path) raster_geotrans = raster.GetGeoTransform() x_res = raster_geotrans[1] y_res = -raster_geotrans[5] return x_res, y_res def
(dsm_path, temp_dir): # check DSM resolution. Downsample if DSM is of very high resolution to save processing time. x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res < 0.3 or y_res < 0.3: target_res = 0.3 # downsample to this resolution (in meters) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path else: if x_res < 2.514e-06 or y_res < 2.514e-06: target_res = 2.514e-06 # downsample to this resolution (in degrees) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path return dsm_path def get_updated_params(dsm_path, search_radius, smoothen_radius): # search_radius and smoothen_radius are set wrt to 30cm DSM # returns updated parameters if DSM is of coarser resolution x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res > 0.3 or y_res > 0.3: search_radius = int((min(x_res, y_res) * search_radius) / 0.3) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 0.3) else: if x_res > 2.514e-06 or y_res > 2.514e-06: search_radius = int((min(x_res, y_res) * search_radius) / 2.514e-06) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 2.514e-06) return search_radius, smoothen_radius def main( dsm_path, out_dir, search_radius=40, smoothen_radius=45, dsm_replace_threshold_val=0.98, ): # master function that calls all other functions os.makedirs(out_dir, exist_ok=True) temp_dir = os.path.join(out_dir, "temp_files") os.makedirs(temp_dir, exist_ok=True) dsm_path = get_res_and_downsample(dsm_path, temp_dir) # get updated params wrt to DSM resolution search_radius, smoothen_radius = get_updated_params( dsm_path, search_radius, smoothen_radius ) # Generate DTM # STEP 1: Generate slope raster from dsm to get average slope value dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_slp_path = os.path.join(temp_dir, dsm_name + "_slp.tif") generate_slope_raster(dsm_path, dsm_slp_path) avg_slp = int(get_mean(dsm_slp_path)) # STEP 2: Split DSM into ground and non-ground surface rasters ground_dem_path = os.path.join(temp_dir, dsm_name + "_ground.sdat") non_ground_dem_path = os.path.join(temp_dir, dsm_name + "_non_ground.sdat") extract_dtm( dsm_path, ground_dem_path, non_ground_dem_path, search_radius, avg_slp, ) # STEP 3: Applying Gaussian Filter on the generated ground raster (parameters: radius = 45, mode = Circle) smoothened_ground_path = os.path.join(temp_dir, dsm_name + "_ground_smth.sdat") smoothen_raster(ground_dem_path, smoothened_ground_path, smoothen_radius) # STEP 4: Generating a difference raster (ground DEM - smoothened ground DEM) diff_raster_path = os.path.join(temp_dir, dsm_name + "_ground_diff.sdat") subtract_rasters(ground_dem_path, smoothened_ground_path, diff_raster_path) # STEP 5: Thresholding on the difference raster to replace values in Ground DEM by no-data values (threshold = 0.98) thresholded_ground_path = os.path.join( temp_dir, dsm_name + "_ground_thresholded.sdat" ) replace_values( ground_dem_path, diff_raster_path, thresholded_ground_path, threshold=dsm_replace_threshold_val, ) # STEP 6: Removing noisy spikes from the generated DTM ground_dem_filtered_path = os.path.join(temp_dir, dsm_name + "_ground_filtered.tif") remove_noise(thresholded_ground_path, ground_dem_filtered_path) # STEP 7: Expanding holes in the thresholded ground raster bigger_holes_ground_path = os.path.join( temp_dir, dsm_name + "_ground_bigger_holes.sdat" ) temp = expand_holes_in_raster(ground_dem_filtered_path) save_array_as_geotif(temp, ground_dem_filtered_path, bigger_holes_ground_path) # STEP 8: Close gaps in the DTM dtm_path = os.path.join(temp_dir, dsm_name + "_dtm.sdat") close_gaps(bigger_holes_ground_path, dtm_path) # STEP 9: Convert to GeoTiff dtm_array = gdal.Open(dtm_path).ReadAsArray() dtm_tif_path = os.path.join(out_dir, dsm_name + "_dtm.tif") # save_array_as_geotif(dtm_array, dsm_path, dtm_tif_path) sdat_to_gtiff(dtm_path, dtm_tif_path) return dtm_tif_path # ----------------------------------------------------------------------------------------------------- if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate DTM from DSM") parser.add_argument("--dsm", help="dsm path string") args = parser.parse_args() dsm_path = args.dsm out_dir = "generated_dtm" dtm_path = main(dsm_path, out_dir) print("######### DTM generated at: ", dtm_path)
get_res_and_downsample
identifier_name
dsm2dtm.py
""" dsm2dtm - Generate DTM (Digital Terrain Model) from DSM (Digital Surface Model) Author: Naman Jain naman.jain@btech2015.iitgn.ac.in www.namanji.wixsite.com/naman/ """ import os import numpy as np import rasterio import argparse try: import gdal except: from osgeo import gdal def downsample_raster(in_path, out_path, downsampling_factor): gdal_raster = gdal.Open(in_path) width, height = gdal_raster.RasterXSize, gdal_raster.RasterYSize gdal.Translate( out_path, in_path, width=int((width // downsampling_factor)), height=int((height // downsampling_factor)), outputType=gdal.GDT_Float32, ) def upsample_raster(in_path, out_path, target_height, target_width): gdal.Translate( out_path, in_path, width=target_width, height=target_height, resampleAlg="bilinear", outputType=gdal.GDT_Float32, ) def generate_slope_raster(in_path, out_path): """ Generates a slope raster from the input DEM raster. Input: in_path: {string} path to the DEM raster Output: out_path: {string} path to the generated slope image """ cmd = "gdaldem slope -alg ZevenbergenThorne {} {}".format(in_path, out_path) os.system(cmd) def get_mean(raster_path, ignore_value=-9999.0): np_raster = np.array(gdal.Open(raster_path).ReadAsArray()) return np_raster[np_raster != ignore_value].mean() def extract_dtm(dsm_path, ground_dem_path, non_ground_dem_path, radius, terrain_slope): """ Generates a ground DEM and non-ground DEM raster from the input DSM raster. Input: dsm_path: {string} path to the DSM raster radius: {int} Search radius of kernel in cells. terrain_slope: {float} average slope of the input terrain Output: ground_dem_path: {string} path to the generated ground DEM raster non_ground_dem_path: {string} path to the generated non-ground DEM raster """ cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format( dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path ) os.system(cmd) def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0): """ Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster. Replaces values in those pixels with No data Value (-99999.0) Input: ground_dem_path: {string} path to the generated ground DEM raster no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value) Output: out_path: {string} path to the filtered ground DEM raster """ ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray()) std = ground_np[ground_np != ignore_value].std() mean = ground_np[ground_np != ignore_value].mean() threshold_value = mean + 1.5 * std ground_np[ground_np >= threshold_value] = -99999.0 save_array_as_geotif(ground_np, ground_dem_path, out_path) def save_array_as_geotif(array, source_tif_path, out_path): """ Generates a geotiff raster from the input numpy array (height * width * depth) Input: array: {numpy array} numpy array to be saved as geotiff source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted. Output: out_path: {string} path to the generated Geotiff raster """ if len(array.shape) > 2: height, width, depth = array.shape else: height, width = array.shape depth = 1 source_tif = gdal.Open(source_tif_path) driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32) if depth != 1: for i in range(depth): dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i]) else: dataset.GetRasterBand(1).WriteArray(array) geotrans = source_tif.GetGeoTransform() proj = source_tif.GetProjection() dataset.SetGeoTransform(geotrans) dataset.SetProjection(proj) dataset.FlushCache() dataset = None def sdat_to_gtiff(sdat_raster_path, out_gtiff_path): gdal.Translate( out_gtiff_path, sdat_raster_path, format="GTiff", ) def close_gaps(in_path, out_path, threshold=0.1): """ Interpolates the holes (no data value) in the input raster. Input: in_path: {string} path to the input raster with holes threshold: {float} Tension Threshold Output: out_path: {string} path to the generated raster with closed holes. """ cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format( in_path, threshold, out_path ) os.system(cmd) def smoothen_raster(in_path, out_path, radius=2): """ Applies gaussian filter to the input raster. Input: in_path: {string} path to the input raster radius: {int} kernel radius to be used for smoothing Output: out_path: {string} path to the generated smoothened raster """ cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format( in_path, out_path, radius ) os.system(cmd) def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0): cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format( rasterA_path, rasterB_path, out_path, no_data_value ) os.system(cmd) def replace_values( rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98 ): """ Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB Input: rasterA_path: {string} path to the input rasterA rasterB_path: {string} path to the input rasterB Output: out_path: {string} path to the generated raster """ cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format( rasterA_path, no_data_value, rasterB_path, out_path, no_data_value, threshold, threshold, ) os.system(cmd) def expand_holes_in_raster( in_path, search_window=7, no_data_value=-99999.0, threshold=50 ): """ Expands holes (cells with no_data_value) in the input raster. Input: in_path: {string} path to the input raster search_window: {int} kernel size to be used as window threshold: {float} threshold on percentage of cells with no_data_value Output: np_raster: {numpy array} Returns the modified input raster's array """ np_raster = np.array(gdal.Open(in_path).ReadAsArray()) height, width = np_raster.shape[0], np_raster.shape[1] for i in range(int((search_window - 1) / 2), width, 1): for j in range(int((search_window - 1) / 2), height, 1): window = np_raster[ int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2) + search_window, int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2) + search_window, ] if ( np.count_nonzero(window == no_data_value) >= (threshold * search_window ** 2) / 100 ):
return np_raster def get_raster_crs(raster_path): """ Returns the CRS (Coordinate Reference System) of the raster Input: raster_path: {string} path to the source tif image """ raster = rasterio.open(raster_path) return raster.crs def get_raster_resolution(raster_path): raster = gdal.Open(raster_path) raster_geotrans = raster.GetGeoTransform() x_res = raster_geotrans[1] y_res = -raster_geotrans[5] return x_res, y_res def get_res_and_downsample(dsm_path, temp_dir): # check DSM resolution. Downsample if DSM is of very high resolution to save processing time. x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res < 0.3 or y_res < 0.3: target_res = 0.3 # downsample to this resolution (in meters) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path else: if x_res < 2.514e-06 or y_res < 2.514e-06: target_res = 2.514e-06 # downsample to this resolution (in degrees) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path return dsm_path def get_updated_params(dsm_path, search_radius, smoothen_radius): # search_radius and smoothen_radius are set wrt to 30cm DSM # returns updated parameters if DSM is of coarser resolution x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res > 0.3 or y_res > 0.3: search_radius = int((min(x_res, y_res) * search_radius) / 0.3) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 0.3) else: if x_res > 2.514e-06 or y_res > 2.514e-06: search_radius = int((min(x_res, y_res) * search_radius) / 2.514e-06) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 2.514e-06) return search_radius, smoothen_radius def main( dsm_path, out_dir, search_radius=40, smoothen_radius=45, dsm_replace_threshold_val=0.98, ): # master function that calls all other functions os.makedirs(out_dir, exist_ok=True) temp_dir = os.path.join(out_dir, "temp_files") os.makedirs(temp_dir, exist_ok=True) dsm_path = get_res_and_downsample(dsm_path, temp_dir) # get updated params wrt to DSM resolution search_radius, smoothen_radius = get_updated_params( dsm_path, search_radius, smoothen_radius ) # Generate DTM # STEP 1: Generate slope raster from dsm to get average slope value dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_slp_path = os.path.join(temp_dir, dsm_name + "_slp.tif") generate_slope_raster(dsm_path, dsm_slp_path) avg_slp = int(get_mean(dsm_slp_path)) # STEP 2: Split DSM into ground and non-ground surface rasters ground_dem_path = os.path.join(temp_dir, dsm_name + "_ground.sdat") non_ground_dem_path = os.path.join(temp_dir, dsm_name + "_non_ground.sdat") extract_dtm( dsm_path, ground_dem_path, non_ground_dem_path, search_radius, avg_slp, ) # STEP 3: Applying Gaussian Filter on the generated ground raster (parameters: radius = 45, mode = Circle) smoothened_ground_path = os.path.join(temp_dir, dsm_name + "_ground_smth.sdat") smoothen_raster(ground_dem_path, smoothened_ground_path, smoothen_radius) # STEP 4: Generating a difference raster (ground DEM - smoothened ground DEM) diff_raster_path = os.path.join(temp_dir, dsm_name + "_ground_diff.sdat") subtract_rasters(ground_dem_path, smoothened_ground_path, diff_raster_path) # STEP 5: Thresholding on the difference raster to replace values in Ground DEM by no-data values (threshold = 0.98) thresholded_ground_path = os.path.join( temp_dir, dsm_name + "_ground_thresholded.sdat" ) replace_values( ground_dem_path, diff_raster_path, thresholded_ground_path, threshold=dsm_replace_threshold_val, ) # STEP 6: Removing noisy spikes from the generated DTM ground_dem_filtered_path = os.path.join(temp_dir, dsm_name + "_ground_filtered.tif") remove_noise(thresholded_ground_path, ground_dem_filtered_path) # STEP 7: Expanding holes in the thresholded ground raster bigger_holes_ground_path = os.path.join( temp_dir, dsm_name + "_ground_bigger_holes.sdat" ) temp = expand_holes_in_raster(ground_dem_filtered_path) save_array_as_geotif(temp, ground_dem_filtered_path, bigger_holes_ground_path) # STEP 8: Close gaps in the DTM dtm_path = os.path.join(temp_dir, dsm_name + "_dtm.sdat") close_gaps(bigger_holes_ground_path, dtm_path) # STEP 9: Convert to GeoTiff dtm_array = gdal.Open(dtm_path).ReadAsArray() dtm_tif_path = os.path.join(out_dir, dsm_name + "_dtm.tif") # save_array_as_geotif(dtm_array, dsm_path, dtm_tif_path) sdat_to_gtiff(dtm_path, dtm_tif_path) return dtm_tif_path # ----------------------------------------------------------------------------------------------------- if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate DTM from DSM") parser.add_argument("--dsm", help="dsm path string") args = parser.parse_args() dsm_path = args.dsm out_dir = "generated_dtm" dtm_path = main(dsm_path, out_dir) print("######### DTM generated at: ", dtm_path)
try: np_raster[i, j] = no_data_value except: pass
conditional_block
dsm2dtm.py
""" dsm2dtm - Generate DTM (Digital Terrain Model) from DSM (Digital Surface Model) Author: Naman Jain naman.jain@btech2015.iitgn.ac.in www.namanji.wixsite.com/naman/ """ import os import numpy as np import rasterio import argparse try: import gdal except: from osgeo import gdal def downsample_raster(in_path, out_path, downsampling_factor): gdal_raster = gdal.Open(in_path) width, height = gdal_raster.RasterXSize, gdal_raster.RasterYSize gdal.Translate( out_path, in_path, width=int((width // downsampling_factor)), height=int((height // downsampling_factor)), outputType=gdal.GDT_Float32, ) def upsample_raster(in_path, out_path, target_height, target_width): gdal.Translate( out_path, in_path, width=target_width, height=target_height, resampleAlg="bilinear", outputType=gdal.GDT_Float32, ) def generate_slope_raster(in_path, out_path): """ Generates a slope raster from the input DEM raster. Input: in_path: {string} path to the DEM raster Output: out_path: {string} path to the generated slope image """ cmd = "gdaldem slope -alg ZevenbergenThorne {} {}".format(in_path, out_path) os.system(cmd) def get_mean(raster_path, ignore_value=-9999.0): np_raster = np.array(gdal.Open(raster_path).ReadAsArray()) return np_raster[np_raster != ignore_value].mean() def extract_dtm(dsm_path, ground_dem_path, non_ground_dem_path, radius, terrain_slope): """ Generates a ground DEM and non-ground DEM raster from the input DSM raster. Input: dsm_path: {string} path to the DSM raster radius: {int} Search radius of kernel in cells. terrain_slope: {float} average slope of the input terrain Output: ground_dem_path: {string} path to the generated ground DEM raster non_ground_dem_path: {string} path to the generated non-ground DEM raster """ cmd = "saga_cmd grid_filter 7 -INPUT {} -RADIUS {} -TERRAINSLOPE {} -GROUND {} -NONGROUND {}".format( dsm_path, radius, terrain_slope, ground_dem_path, non_ground_dem_path ) os.system(cmd) def remove_noise(ground_dem_path, out_path, ignore_value=-99999.0): """ Removes noise (high elevation data points like roofs, etc.) from the ground DEM raster. Replaces values in those pixels with No data Value (-99999.0) Input: ground_dem_path: {string} path to the generated ground DEM raster no_data_value: {float} replacing value in the ground raster (to be treated as No Data Value) Output: out_path: {string} path to the filtered ground DEM raster """ ground_np = np.array(gdal.Open(ground_dem_path).ReadAsArray()) std = ground_np[ground_np != ignore_value].std() mean = ground_np[ground_np != ignore_value].mean() threshold_value = mean + 1.5 * std ground_np[ground_np >= threshold_value] = -99999.0 save_array_as_geotif(ground_np, ground_dem_path, out_path) def save_array_as_geotif(array, source_tif_path, out_path): """ Generates a geotiff raster from the input numpy array (height * width * depth) Input: array: {numpy array} numpy array to be saved as geotiff source_tif_path: {string} path to the geotiff from which projection and geotransformation information will be extracted. Output: out_path: {string} path to the generated Geotiff raster """ if len(array.shape) > 2: height, width, depth = array.shape else: height, width = array.shape depth = 1 source_tif = gdal.Open(source_tif_path) driver = gdal.GetDriverByName("GTiff") dataset = driver.Create(out_path, width, height, depth, gdal.GDT_Float32) if depth != 1: for i in range(depth): dataset.GetRasterBand(i + 1).WriteArray(array[:, :, i]) else: dataset.GetRasterBand(1).WriteArray(array) geotrans = source_tif.GetGeoTransform() proj = source_tif.GetProjection() dataset.SetGeoTransform(geotrans) dataset.SetProjection(proj) dataset.FlushCache() dataset = None def sdat_to_gtiff(sdat_raster_path, out_gtiff_path): gdal.Translate( out_gtiff_path, sdat_raster_path, format="GTiff", ) def close_gaps(in_path, out_path, threshold=0.1): """ Interpolates the holes (no data value) in the input raster. Input: in_path: {string} path to the input raster with holes threshold: {float} Tension Threshold Output: out_path: {string} path to the generated raster with closed holes. """ cmd = "saga_cmd grid_tools 7 -INPUT {} -THRESHOLD {} -RESULT {}".format( in_path, threshold, out_path ) os.system(cmd) def smoothen_raster(in_path, out_path, radius=2): """ Applies gaussian filter to the input raster. Input: in_path: {string} path to the input raster radius: {int} kernel radius to be used for smoothing Output: out_path: {string} path to the generated smoothened raster """ cmd = "saga_cmd grid_filter 1 -INPUT {} -RESULT {} -KERNEL_TYPE 0 -KERNEL_RADIUS {}".format( in_path, out_path, radius ) os.system(cmd) def subtract_rasters(rasterA_path, rasterB_path, out_path, no_data_value=-99999.0): cmd = 'gdal_calc.py -A {} -B {} --outfile {} --NoDataValue={} --calc="A-B"'.format( rasterA_path, rasterB_path, out_path, no_data_value ) os.system(cmd) def replace_values( rasterA_path, rasterB_path, out_path, no_data_value=-99999.0, threshold=0.98 ): """ Replaces values in input rasterA with no_data_value where cell value >= threshold in rasterB Input: rasterA_path: {string} path to the input rasterA rasterB_path: {string} path to the input rasterB Output: out_path: {string} path to the generated raster """ cmd = 'gdal_calc.py -A {} --NoDataValue={} -B {} --outfile {} --calc="{}*(B>={}) + (A)*(B<{})"'.format( rasterA_path, no_data_value, rasterB_path, out_path, no_data_value, threshold, threshold, ) os.system(cmd) def expand_holes_in_raster( in_path, search_window=7, no_data_value=-99999.0, threshold=50 ): """ Expands holes (cells with no_data_value) in the input raster. Input: in_path: {string} path to the input raster search_window: {int} kernel size to be used as window threshold: {float} threshold on percentage of cells with no_data_value Output: np_raster: {numpy array} Returns the modified input raster's array """ np_raster = np.array(gdal.Open(in_path).ReadAsArray()) height, width = np_raster.shape[0], np_raster.shape[1] for i in range(int((search_window - 1) / 2), width, 1): for j in range(int((search_window - 1) / 2), height, 1): window = np_raster[ int(i - (search_window - 1) / 2) : int(i - (search_window - 1) / 2) + search_window, int(j - (search_window - 1) / 2) : int(j - (search_window - 1) / 2) + search_window, ] if ( np.count_nonzero(window == no_data_value) >= (threshold * search_window ** 2) / 100 ): try: np_raster[i, j] = no_data_value except: pass return np_raster def get_raster_crs(raster_path): """ Returns the CRS (Coordinate Reference System) of the raster Input: raster_path: {string} path to the source tif image """ raster = rasterio.open(raster_path) return raster.crs def get_raster_resolution(raster_path): raster = gdal.Open(raster_path) raster_geotrans = raster.GetGeoTransform() x_res = raster_geotrans[1] y_res = -raster_geotrans[5] return x_res, y_res def get_res_and_downsample(dsm_path, temp_dir): # check DSM resolution. Downsample if DSM is of very high resolution to save processing time. x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_name = dsm_path.split("/")[-1].split(".")[0]
if x_res < 0.3 or y_res < 0.3: target_res = 0.3 # downsample to this resolution (in meters) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path else: if x_res < 2.514e-06 or y_res < 2.514e-06: target_res = 2.514e-06 # downsample to this resolution (in degrees) downsampling_factor = target_res / gdal.Open(dsm_path).GetGeoTransform()[1] downsampled_dsm_path = os.path.join(temp_dir, dsm_name + "_ds.tif") # Dowmsampling DSM downsample_raster(dsm_path, downsampled_dsm_path, downsampling_factor) dsm_path = downsampled_dsm_path return dsm_path def get_updated_params(dsm_path, search_radius, smoothen_radius): # search_radius and smoothen_radius are set wrt to 30cm DSM # returns updated parameters if DSM is of coarser resolution x_res, y_res = get_raster_resolution(dsm_path) # resolutions are in meters dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326: if x_res > 0.3 or y_res > 0.3: search_radius = int((min(x_res, y_res) * search_radius) / 0.3) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 0.3) else: if x_res > 2.514e-06 or y_res > 2.514e-06: search_radius = int((min(x_res, y_res) * search_radius) / 2.514e-06) smoothen_radius = int((min(x_res, y_res) * smoothen_radius) / 2.514e-06) return search_radius, smoothen_radius def main( dsm_path, out_dir, search_radius=40, smoothen_radius=45, dsm_replace_threshold_val=0.98, ): # master function that calls all other functions os.makedirs(out_dir, exist_ok=True) temp_dir = os.path.join(out_dir, "temp_files") os.makedirs(temp_dir, exist_ok=True) dsm_path = get_res_and_downsample(dsm_path, temp_dir) # get updated params wrt to DSM resolution search_radius, smoothen_radius = get_updated_params( dsm_path, search_radius, smoothen_radius ) # Generate DTM # STEP 1: Generate slope raster from dsm to get average slope value dsm_name = dsm_path.split("/")[-1].split(".")[0] dsm_slp_path = os.path.join(temp_dir, dsm_name + "_slp.tif") generate_slope_raster(dsm_path, dsm_slp_path) avg_slp = int(get_mean(dsm_slp_path)) # STEP 2: Split DSM into ground and non-ground surface rasters ground_dem_path = os.path.join(temp_dir, dsm_name + "_ground.sdat") non_ground_dem_path = os.path.join(temp_dir, dsm_name + "_non_ground.sdat") extract_dtm( dsm_path, ground_dem_path, non_ground_dem_path, search_radius, avg_slp, ) # STEP 3: Applying Gaussian Filter on the generated ground raster (parameters: radius = 45, mode = Circle) smoothened_ground_path = os.path.join(temp_dir, dsm_name + "_ground_smth.sdat") smoothen_raster(ground_dem_path, smoothened_ground_path, smoothen_radius) # STEP 4: Generating a difference raster (ground DEM - smoothened ground DEM) diff_raster_path = os.path.join(temp_dir, dsm_name + "_ground_diff.sdat") subtract_rasters(ground_dem_path, smoothened_ground_path, diff_raster_path) # STEP 5: Thresholding on the difference raster to replace values in Ground DEM by no-data values (threshold = 0.98) thresholded_ground_path = os.path.join( temp_dir, dsm_name + "_ground_thresholded.sdat" ) replace_values( ground_dem_path, diff_raster_path, thresholded_ground_path, threshold=dsm_replace_threshold_val, ) # STEP 6: Removing noisy spikes from the generated DTM ground_dem_filtered_path = os.path.join(temp_dir, dsm_name + "_ground_filtered.tif") remove_noise(thresholded_ground_path, ground_dem_filtered_path) # STEP 7: Expanding holes in the thresholded ground raster bigger_holes_ground_path = os.path.join( temp_dir, dsm_name + "_ground_bigger_holes.sdat" ) temp = expand_holes_in_raster(ground_dem_filtered_path) save_array_as_geotif(temp, ground_dem_filtered_path, bigger_holes_ground_path) # STEP 8: Close gaps in the DTM dtm_path = os.path.join(temp_dir, dsm_name + "_dtm.sdat") close_gaps(bigger_holes_ground_path, dtm_path) # STEP 9: Convert to GeoTiff dtm_array = gdal.Open(dtm_path).ReadAsArray() dtm_tif_path = os.path.join(out_dir, dsm_name + "_dtm.tif") # save_array_as_geotif(dtm_array, dsm_path, dtm_tif_path) sdat_to_gtiff(dtm_path, dtm_tif_path) return dtm_tif_path # ----------------------------------------------------------------------------------------------------- if __name__ == "__main__": parser = argparse.ArgumentParser(description="Generate DTM from DSM") parser.add_argument("--dsm", help="dsm path string") args = parser.parse_args() dsm_path = args.dsm out_dir = "generated_dtm" dtm_path = main(dsm_path, out_dir) print("######### DTM generated at: ", dtm_path)
dsm_crs = get_raster_crs(dsm_path) if dsm_crs != 4326:
random_line_split
lib.rs
//! Usage //! ----- //! //! For simple applications, use one of the utility functions `listen` and `connect`: //! //! `listen` accpets a string that represents a socket address and a Factory, see //! [Architecture](#architecture). //! //! ```no_run //! // A WebSocket echo server //! //! use ws::listen; //! //! listen("127.0.0.1:3012", |out| { //! move |msg| { //! out.send(msg) //! } //! }).unwrap() //! ``` //! //! `connect` accepts a string that represents a WebSocket URL (i.e. one that starts with ws://), //! and it will attempt to connect to a WebSocket server at that location. It also accepts a //! Factory. //! //! ```no_run //! // A WebSocket client that sends one message then closes //! //! use ws::{connect, CloseCode}; //! //! connect("ws://127.0.0.1:3012", |out| { //! out.send("Hello WebSocket").unwrap(); //! //! move |msg| { //! println!("Got message: {}", msg); //! out.close(CloseCode::Normal) //! } //! }).unwrap() //! ``` //! //! Each of these functions encapsulates a mio EventLoop, creating and running a WebSocket in the //! current thread. These are blocking functions, so they will only return after the encapsulated //! WebSocket has been shutdown. //! //! Architecture //! ------ //! //! A WebSocket requires two basic components: a Factory and a Handler. A Factory is any struct //! that implements the `Factory` trait. WS-RS already provides an implementation of `Factory` for //! closures, so it is possible to pass a closure as a Factory to either of the utility functions. //! Your Factory will be called each time the underlying TCP connection has been successfully //! established, and it will need to return a Handler that will handle the new WebSocket connection. //! //! Factories can be used to manage state that applies to multiple WebSocket connections, //! whereas Handlers manage the state of individual connections. Most of the time, a closure //! Factory is sufficient, and you will only need to focus on writing your Handler. //! Your Factory will be passed a Sender struct that represents the output of the WebSocket. //! The Sender allows the Handler to send messages, initiate a WebSocket closing handshake //! by sending a close code, and other useful actions. If you need to send messages from other parts //! of your application it is possible to clone and send the Sender across threads allowing //! other code to send messages on the WebSocket without blocking the event loop. //! //! Just as with the Factory, it is possible to use a closure as a simple Handler. The closure must //! take a Message as it's only argument, and it may close over variables that exist in //! the Factory. For example, in the above examples using `listen` and `connect`, the closure //! Factory returns another closure as the Handler for the new connection. This closure closes over //! the variable `out`, which is the Sender, representing the output of the WebSocket, so that it //! can use that sender later to send a Message. Closure Handlers generally need to take ownership of the variables //! that they close over because the Factory may be called multiple times. Think of Handlers as //! though they are threads and Rust's memory model should make sense. Closure Handlers must return //! a `Result<()>`, in order to handle errors without panicking. //! //! In the above examples, `out.close` and `out.send` both actually return a `Result<()>` indicating //! whether they were able to schedule the requested command (either `close` or `send`) with the //! EventLoop. //! //! *It is important that your Handler does not panic carelessly because a handler that panics will //! disconnect every other connection that is using that WebSocket. Don't panic unless you want all //! connections to immediately fail.* //! //! Guide //! ----- //! //! You may have noticed in the usage exmaples that the client example calls `unwrap` when sending the first //! message, which will panic in the factory if the Message can't be sent for some reason. Also, //! sending messages before a handler is returned means that the message will be queued before //! the WebSocket handshake is complete. The handshake could fail for some reason, and then the //! queued message would be wasted effort. Sending messages in the Factory is not bad for simple, //! short-lived, or toy projects, but let's explore writing a handler that is better for //! long-running applications. //! //! In order to solve the problem of sending a message immediately when a WebSocket connection is //! established, you will need to write a Handler that implements the `on_open` method. For //! example: //! //! ```no_run //! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode}; //! //! // Our Handler struct. //! // Here we explicity indicate that the Client needs a Sender, //! // whereas a closure captures the Sender for us automatically. //! struct Client { //! out: Sender, //! } //! //! // We implement the Handler trait for Client so that we can get more //! // fine-grained control of the connection. //! impl Handler for Client { //! //! // `on_open` will be called only after the WebSocket handshake is successful //! // so at this point we know that the connection is ready to send/receive messages. //! // We ignore the `Handshake` for now, but you could also use this method to setup //! // Handler state or reject the connection based on the details of the Request //! // or Response, such as by checking cookies or Auth headers. //! fn on_open(&mut self, _: Handshake) -> Result<()> { //! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`. //! // If this call fails, it will only result in this connection disconnecting. //! self.out.send("Hello WebSocket") //! } //! //! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message` //! // and returns a `Result<()>`. //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Close the connection when we get a response from the server //! println!("Got message: {}", msg); //! self.out.close(CloseCode::Normal) //! } //! } //! //! // Now, instead of a closure, the Factory returns a new instance of our Handler. //! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap() //! ``` //! //! That is a big increase in verbosity in order to accomplish the same effect as the //! original example, but this way is more flexible and gives you access to more of the underlying //! details of the WebSocket connection. //! //! Another method you will probably want to implement is `on_close`. This method is called anytime //! the other side of the WebSocket connection attempts to close the connection. Implementing //! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection //! may have been closed, and it also gives you an opportunity to clean up any resources or state //! that may be dependent on the connection that is now about to disconnect. //! //! An example server might use this as follows: //! //! ```no_run //! use ws::{listen, Handler, Sender, Result, Message, CloseCode}; //! //! struct Server { //! out: Sender, //! } //! //! impl Handler for Server { //! //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Echo the message back //! self.out.send(msg) //! } //! //! fn on_close(&mut self, code: CloseCode, reason: &str) { //! // The WebSocket protocol allows for a utf8 reason for the closing state after the //! // close code. WS-RS will attempt to interpret this data as a utf8 description of the //! // reason for closing the connection. I many cases, `reason` will be an empty string. //! // So, you may not normally want to display `reason` to the user, //! // but let's assume that we know that `reason` is human-readable. //! match code { //! CloseCode::Normal => println!("The client is done with the connection."), //! CloseCode::Away => println!("The client is leaving the site."), //! _ => println!("The client encountered an error: {}", reason), //! } //! } //! } //! //! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap() //! ``` //! //! Errors don't just occur on the other side of the connection, sometimes your code will encounter //! an exceptional state too. You can access errors by implementing `on_error`. By implementing //! `on_error` you can inform the user of an error and tear down any resources that you may have //! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds //! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of //! sending the appropriate close code. //! //! A server that tracks state outside of the handler might be as follows: //! //! ```no_run //! //! use std::rc::Rc; //! use std::cell::RefCell; //! //! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error}; //! //! struct Server { //! out: Sender, //! count: Rc<RefCell<usize>>, //! } //! //! impl Handler for Server { //! //! fn on_open(&mut self, _: Handshake) -> Result<()> { //! // We have a new connection, so we increment the connection counter //! Ok(*self.count.borrow_mut() += 1) //! } //! //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Tell the user the current count //! println!("The number of live connections is {}", *self.count.borrow()); //! //! // Echo the message back //! self.out.send(msg) //! } //! //! fn on_close(&mut self, code: CloseCode, reason: &str) { //! match code { //! CloseCode::Normal => println!("The client is done with the connection."), //! CloseCode::Away => println!("The client is leaving the site."), //! _ => println!("The client encountered an error: {}", reason), //! } //! //! // The connection is going down, so we need to decrement the count //! *self.count.borrow_mut() -= 1 //! } //! //! fn on_error(&mut self, err: Error) { //! println!("The server encountered an error: {:?}", err); //! //! // The connection is going down, so we need to decrement the count //! *self.count.borrow_mut() -= 1 //! } //! //! } //! // RefCell enforces Rust borrowing rules at runtime. //! // Calling borrow_mut will panic if the count being borrowed, //! // but we know already that only one handler at a time will ever try to change the count. //! // Rc is a reference-counted box for sharing the count between handlers //! // since each handler needs to own its contents. //! let count = Rc::new(RefCell::new(0)); //! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap() //! ``` //! //! There are other Handler methods that allow even more fine-grained access, but most applications //! will usually only need these four methods. //! extern crate httparse; extern crate mio; extern crate sha1; extern crate rand; extern crate url; #[macro_use] extern crate log; mod result; mod connection; mod frame; mod message; mod handshake; mod protocol; mod communication; mod io; pub use connection::factory::Factory; pub use connection::factory::Settings as WebSocketSettings; pub use connection::handler::Handler; pub use connection::handler::Settings as ConnectionSettings; pub use result::{Result, Error}; pub use result::Kind as ErrorKind; pub use message::Message; pub use communication::Sender; pub use protocol::CloseCode; pub use handshake::{Handshake, Request, Response}; use std::fmt; use std::net::ToSocketAddrs; use mio::EventLoopConfig; use std::borrow::Borrow; /// A utility function for setting up a WebSocket server. /// /// # Safety /// /// This function blocks until the EventLoop finishes running. Avoid calling this method within /// another WebSocket handler. /// /// # Examples /// /// ```no_run /// use ws::listen; /// /// listen("127.0.0.1:3012", |out| { /// move |msg| { /// out.send(msg) /// } /// }).unwrap() /// ``` /// pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()> where A: ToSocketAddrs + fmt::Debug, F: FnMut(Sender) -> H, H: Handler, { let ws = try!(WebSocket::new(factory)); try!(ws.listen(addr)); Ok(()) } /// A utility function for setting up a WebSocket client. /// /// # Safety /// /// This function blocks until the EventLoop finishes running. Avoid calling this method within /// another WebSocket handler. If you need to establish a connection from inside of a handler, /// use the `connect` method on the Sender. /// /// # Examples /// /// ```no_run /// use ws::{connect, CloseCode}; /// /// connect("ws://127.0.0.1:3012", |out| { /// out.send("Hello WebSocket").unwrap(); /// /// move |msg| { /// println!("Got message: {}", msg); /// out.close(CloseCode::Normal) /// } /// }).unwrap() /// ``` /// pub fn connect<U, F, H>(url: U, factory: F) -> Result<()> where U: Borrow<str>, F: FnMut(Sender) -> H, H: Handler { let mut ws = try!(WebSocket::new(factory)); let parsed = try!( url::Url::parse(url.borrow()) .map_err(|err| Error::new( ErrorKind::Internal, format!("Unable to parse {} as url due to {:?}", url.borrow(), err)))); try!(ws.connect(parsed)); try!(ws.run()); Ok(()) } /// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections. pub struct WebSocket<F> where F: Factory { event_loop: io::Loop<F>, handler: io::Handler<F>, } impl<F> WebSocket<F> where F: Factory { /// Create a new WebSocket using the given Factory to create handlers. pub fn new(mut factory: F) -> Result<WebSocket<F>> { let max = factory.settings().max_connections; let mut config = EventLoopConfig::new(); config.notify_capacity(max + 1000); WebSocket::with_config(factory, config) } /// Create a new WebSocket with a Factory and use the event loop config to provide settings for /// the event loop. pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> { Ok(WebSocket { event_loop: try!(io::Loop::configured(config)), handler: io::Handler::new(factory), }) } /// Consume the WebSocket and listen for new connections on the specified address. /// /// # Safety /// /// This method will block until the event loop finishes running. pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>> where A: ToSocketAddrs + fmt::Debug { let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec))); for addr in try!(addr_spec.to_socket_addrs()) { result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ()); if result.is_ok()
} result.map(|_| self) } /// Queue an outgoing connection on this WebSocket. This method may be called multiple times, /// but the actuall connections will not be established until after `run` is called. pub fn connect(&mut self, url: url::Url) -> Result<&mut WebSocket<F>> { let sender = Sender::new(io::ALL, self.event_loop.channel()); try!(sender.connect(url)); Ok(self) } /// Run the WebSocket. This will run the encapsulated event loop blocking until the WebSocket /// is shutdown. pub fn run(mut self) -> Result<WebSocket<F>> { try!(self.event_loop.run(&mut self.handler)); Ok(self) } }
{ return self.run() }
conditional_block
lib.rs
//! Usage //! ----- //! //! For simple applications, use one of the utility functions `listen` and `connect`: //! //! `listen` accpets a string that represents a socket address and a Factory, see //! [Architecture](#architecture). //! //! ```no_run //! // A WebSocket echo server //! //! use ws::listen; //! //! listen("127.0.0.1:3012", |out| { //! move |msg| { //! out.send(msg) //! } //! }).unwrap() //! ``` //! //! `connect` accepts a string that represents a WebSocket URL (i.e. one that starts with ws://), //! and it will attempt to connect to a WebSocket server at that location. It also accepts a //! Factory. //! //! ```no_run //! // A WebSocket client that sends one message then closes //! //! use ws::{connect, CloseCode}; //! //! connect("ws://127.0.0.1:3012", |out| { //! out.send("Hello WebSocket").unwrap(); //! //! move |msg| { //! println!("Got message: {}", msg); //! out.close(CloseCode::Normal) //! } //! }).unwrap() //! ``` //! //! Each of these functions encapsulates a mio EventLoop, creating and running a WebSocket in the //! current thread. These are blocking functions, so they will only return after the encapsulated //! WebSocket has been shutdown. //! //! Architecture //! ------ //! //! A WebSocket requires two basic components: a Factory and a Handler. A Factory is any struct //! that implements the `Factory` trait. WS-RS already provides an implementation of `Factory` for //! closures, so it is possible to pass a closure as a Factory to either of the utility functions. //! Your Factory will be called each time the underlying TCP connection has been successfully //! established, and it will need to return a Handler that will handle the new WebSocket connection. //! //! Factories can be used to manage state that applies to multiple WebSocket connections, //! whereas Handlers manage the state of individual connections. Most of the time, a closure //! Factory is sufficient, and you will only need to focus on writing your Handler. //! Your Factory will be passed a Sender struct that represents the output of the WebSocket. //! The Sender allows the Handler to send messages, initiate a WebSocket closing handshake //! by sending a close code, and other useful actions. If you need to send messages from other parts //! of your application it is possible to clone and send the Sender across threads allowing //! other code to send messages on the WebSocket without blocking the event loop. //! //! Just as with the Factory, it is possible to use a closure as a simple Handler. The closure must //! take a Message as it's only argument, and it may close over variables that exist in //! the Factory. For example, in the above examples using `listen` and `connect`, the closure //! Factory returns another closure as the Handler for the new connection. This closure closes over //! the variable `out`, which is the Sender, representing the output of the WebSocket, so that it //! can use that sender later to send a Message. Closure Handlers generally need to take ownership of the variables //! that they close over because the Factory may be called multiple times. Think of Handlers as //! though they are threads and Rust's memory model should make sense. Closure Handlers must return //! a `Result<()>`, in order to handle errors without panicking. //! //! In the above examples, `out.close` and `out.send` both actually return a `Result<()>` indicating //! whether they were able to schedule the requested command (either `close` or `send`) with the //! EventLoop. //! //! *It is important that your Handler does not panic carelessly because a handler that panics will //! disconnect every other connection that is using that WebSocket. Don't panic unless you want all //! connections to immediately fail.* //! //! Guide //! ----- //! //! You may have noticed in the usage exmaples that the client example calls `unwrap` when sending the first //! message, which will panic in the factory if the Message can't be sent for some reason. Also, //! sending messages before a handler is returned means that the message will be queued before //! the WebSocket handshake is complete. The handshake could fail for some reason, and then the //! queued message would be wasted effort. Sending messages in the Factory is not bad for simple, //! short-lived, or toy projects, but let's explore writing a handler that is better for //! long-running applications. //! //! In order to solve the problem of sending a message immediately when a WebSocket connection is //! established, you will need to write a Handler that implements the `on_open` method. For //! example: //! //! ```no_run //! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode}; //! //! // Our Handler struct. //! // Here we explicity indicate that the Client needs a Sender, //! // whereas a closure captures the Sender for us automatically. //! struct Client { //! out: Sender, //! } //! //! // We implement the Handler trait for Client so that we can get more //! // fine-grained control of the connection. //! impl Handler for Client { //! //! // `on_open` will be called only after the WebSocket handshake is successful //! // so at this point we know that the connection is ready to send/receive messages. //! // We ignore the `Handshake` for now, but you could also use this method to setup //! // Handler state or reject the connection based on the details of the Request //! // or Response, such as by checking cookies or Auth headers. //! fn on_open(&mut self, _: Handshake) -> Result<()> { //! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`. //! // If this call fails, it will only result in this connection disconnecting. //! self.out.send("Hello WebSocket") //! } //! //! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message` //! // and returns a `Result<()>`. //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Close the connection when we get a response from the server //! println!("Got message: {}", msg); //! self.out.close(CloseCode::Normal) //! } //! } //! //! // Now, instead of a closure, the Factory returns a new instance of our Handler. //! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap() //! ``` //! //! That is a big increase in verbosity in order to accomplish the same effect as the //! original example, but this way is more flexible and gives you access to more of the underlying //! details of the WebSocket connection. //! //! Another method you will probably want to implement is `on_close`. This method is called anytime //! the other side of the WebSocket connection attempts to close the connection. Implementing //! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection //! may have been closed, and it also gives you an opportunity to clean up any resources or state //! that may be dependent on the connection that is now about to disconnect. //! //! An example server might use this as follows: //! //! ```no_run //! use ws::{listen, Handler, Sender, Result, Message, CloseCode}; //! //! struct Server { //! out: Sender, //! } //! //! impl Handler for Server { //! //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Echo the message back //! self.out.send(msg) //! } //! //! fn on_close(&mut self, code: CloseCode, reason: &str) { //! // The WebSocket protocol allows for a utf8 reason for the closing state after the //! // close code. WS-RS will attempt to interpret this data as a utf8 description of the //! // reason for closing the connection. I many cases, `reason` will be an empty string. //! // So, you may not normally want to display `reason` to the user, //! // but let's assume that we know that `reason` is human-readable. //! match code { //! CloseCode::Normal => println!("The client is done with the connection."), //! CloseCode::Away => println!("The client is leaving the site."), //! _ => println!("The client encountered an error: {}", reason), //! } //! } //! } //! //! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap() //! ``` //! //! Errors don't just occur on the other side of the connection, sometimes your code will encounter //! an exceptional state too. You can access errors by implementing `on_error`. By implementing //! `on_error` you can inform the user of an error and tear down any resources that you may have //! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds //! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of //! sending the appropriate close code. //! //! A server that tracks state outside of the handler might be as follows: //! //! ```no_run //! //! use std::rc::Rc; //! use std::cell::RefCell; //! //! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error}; //! //! struct Server { //! out: Sender, //! count: Rc<RefCell<usize>>, //! } //! //! impl Handler for Server { //! //! fn on_open(&mut self, _: Handshake) -> Result<()> { //! // We have a new connection, so we increment the connection counter //! Ok(*self.count.borrow_mut() += 1) //! } //! //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Tell the user the current count //! println!("The number of live connections is {}", *self.count.borrow()); //! //! // Echo the message back //! self.out.send(msg) //! } //! //! fn on_close(&mut self, code: CloseCode, reason: &str) { //! match code { //! CloseCode::Normal => println!("The client is done with the connection."), //! CloseCode::Away => println!("The client is leaving the site."), //! _ => println!("The client encountered an error: {}", reason), //! } //! //! // The connection is going down, so we need to decrement the count //! *self.count.borrow_mut() -= 1 //! } //! //! fn on_error(&mut self, err: Error) { //! println!("The server encountered an error: {:?}", err); //! //! // The connection is going down, so we need to decrement the count //! *self.count.borrow_mut() -= 1 //! } //! //! } //! // RefCell enforces Rust borrowing rules at runtime. //! // Calling borrow_mut will panic if the count being borrowed, //! // but we know already that only one handler at a time will ever try to change the count. //! // Rc is a reference-counted box for sharing the count between handlers //! // since each handler needs to own its contents. //! let count = Rc::new(RefCell::new(0)); //! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap() //! ``` //! //! There are other Handler methods that allow even more fine-grained access, but most applications //! will usually only need these four methods. //! extern crate httparse; extern crate mio; extern crate sha1; extern crate rand; extern crate url; #[macro_use] extern crate log; mod result; mod connection; mod frame; mod message; mod handshake; mod protocol; mod communication; mod io; pub use connection::factory::Factory; pub use connection::factory::Settings as WebSocketSettings; pub use connection::handler::Handler; pub use connection::handler::Settings as ConnectionSettings; pub use result::{Result, Error}; pub use result::Kind as ErrorKind; pub use message::Message; pub use communication::Sender; pub use protocol::CloseCode; pub use handshake::{Handshake, Request, Response}; use std::fmt; use std::net::ToSocketAddrs; use mio::EventLoopConfig; use std::borrow::Borrow; /// A utility function for setting up a WebSocket server. /// /// # Safety /// /// This function blocks until the EventLoop finishes running. Avoid calling this method within /// another WebSocket handler. /// /// # Examples /// /// ```no_run /// use ws::listen; /// /// listen("127.0.0.1:3012", |out| { /// move |msg| { /// out.send(msg) /// } /// }).unwrap() /// ``` /// pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()> where A: ToSocketAddrs + fmt::Debug, F: FnMut(Sender) -> H, H: Handler, { let ws = try!(WebSocket::new(factory)); try!(ws.listen(addr)); Ok(()) } /// A utility function for setting up a WebSocket client. /// /// # Safety /// /// This function blocks until the EventLoop finishes running. Avoid calling this method within /// another WebSocket handler. If you need to establish a connection from inside of a handler, /// use the `connect` method on the Sender. /// /// # Examples /// /// ```no_run /// use ws::{connect, CloseCode}; /// /// connect("ws://127.0.0.1:3012", |out| { /// out.send("Hello WebSocket").unwrap(); /// /// move |msg| { /// println!("Got message: {}", msg); /// out.close(CloseCode::Normal) /// } /// }).unwrap() /// ``` /// pub fn
<U, F, H>(url: U, factory: F) -> Result<()> where U: Borrow<str>, F: FnMut(Sender) -> H, H: Handler { let mut ws = try!(WebSocket::new(factory)); let parsed = try!( url::Url::parse(url.borrow()) .map_err(|err| Error::new( ErrorKind::Internal, format!("Unable to parse {} as url due to {:?}", url.borrow(), err)))); try!(ws.connect(parsed)); try!(ws.run()); Ok(()) } /// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections. pub struct WebSocket<F> where F: Factory { event_loop: io::Loop<F>, handler: io::Handler<F>, } impl<F> WebSocket<F> where F: Factory { /// Create a new WebSocket using the given Factory to create handlers. pub fn new(mut factory: F) -> Result<WebSocket<F>> { let max = factory.settings().max_connections; let mut config = EventLoopConfig::new(); config.notify_capacity(max + 1000); WebSocket::with_config(factory, config) } /// Create a new WebSocket with a Factory and use the event loop config to provide settings for /// the event loop. pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> { Ok(WebSocket { event_loop: try!(io::Loop::configured(config)), handler: io::Handler::new(factory), }) } /// Consume the WebSocket and listen for new connections on the specified address. /// /// # Safety /// /// This method will block until the event loop finishes running. pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>> where A: ToSocketAddrs + fmt::Debug { let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec))); for addr in try!(addr_spec.to_socket_addrs()) { result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ()); if result.is_ok() { return self.run() } } result.map(|_| self) } /// Queue an outgoing connection on this WebSocket. This method may be called multiple times, /// but the actuall connections will not be established until after `run` is called. pub fn connect(&mut self, url: url::Url) -> Result<&mut WebSocket<F>> { let sender = Sender::new(io::ALL, self.event_loop.channel()); try!(sender.connect(url)); Ok(self) } /// Run the WebSocket. This will run the encapsulated event loop blocking until the WebSocket /// is shutdown. pub fn run(mut self) -> Result<WebSocket<F>> { try!(self.event_loop.run(&mut self.handler)); Ok(self) } }
connect
identifier_name
lib.rs
//! Usage //! ----- //! //! For simple applications, use one of the utility functions `listen` and `connect`: //! //! `listen` accpets a string that represents a socket address and a Factory, see //! [Architecture](#architecture). //! //! ```no_run //! // A WebSocket echo server //! //! use ws::listen; //! //! listen("127.0.0.1:3012", |out| { //! move |msg| { //! out.send(msg) //! } //! }).unwrap() //! ``` //! //! `connect` accepts a string that represents a WebSocket URL (i.e. one that starts with ws://), //! and it will attempt to connect to a WebSocket server at that location. It also accepts a //! Factory. //! //! ```no_run //! // A WebSocket client that sends one message then closes //! //! use ws::{connect, CloseCode}; //! //! connect("ws://127.0.0.1:3012", |out| { //! out.send("Hello WebSocket").unwrap(); //! //! move |msg| { //! println!("Got message: {}", msg); //! out.close(CloseCode::Normal) //! } //! }).unwrap() //! ``` //! //! Each of these functions encapsulates a mio EventLoop, creating and running a WebSocket in the //! current thread. These are blocking functions, so they will only return after the encapsulated //! WebSocket has been shutdown. //! //! Architecture //! ------ //! //! A WebSocket requires two basic components: a Factory and a Handler. A Factory is any struct //! that implements the `Factory` trait. WS-RS already provides an implementation of `Factory` for //! closures, so it is possible to pass a closure as a Factory to either of the utility functions. //! Your Factory will be called each time the underlying TCP connection has been successfully //! established, and it will need to return a Handler that will handle the new WebSocket connection. //! //! Factories can be used to manage state that applies to multiple WebSocket connections, //! whereas Handlers manage the state of individual connections. Most of the time, a closure //! Factory is sufficient, and you will only need to focus on writing your Handler. //! Your Factory will be passed a Sender struct that represents the output of the WebSocket. //! The Sender allows the Handler to send messages, initiate a WebSocket closing handshake //! by sending a close code, and other useful actions. If you need to send messages from other parts //! of your application it is possible to clone and send the Sender across threads allowing //! other code to send messages on the WebSocket without blocking the event loop. //! //! Just as with the Factory, it is possible to use a closure as a simple Handler. The closure must //! take a Message as it's only argument, and it may close over variables that exist in //! the Factory. For example, in the above examples using `listen` and `connect`, the closure //! Factory returns another closure as the Handler for the new connection. This closure closes over //! the variable `out`, which is the Sender, representing the output of the WebSocket, so that it //! can use that sender later to send a Message. Closure Handlers generally need to take ownership of the variables //! that they close over because the Factory may be called multiple times. Think of Handlers as //! though they are threads and Rust's memory model should make sense. Closure Handlers must return //! a `Result<()>`, in order to handle errors without panicking. //! //! In the above examples, `out.close` and `out.send` both actually return a `Result<()>` indicating //! whether they were able to schedule the requested command (either `close` or `send`) with the //! EventLoop. //! //! *It is important that your Handler does not panic carelessly because a handler that panics will //! disconnect every other connection that is using that WebSocket. Don't panic unless you want all //! connections to immediately fail.* //! //! Guide //! ----- //! //! You may have noticed in the usage exmaples that the client example calls `unwrap` when sending the first //! message, which will panic in the factory if the Message can't be sent for some reason. Also, //! sending messages before a handler is returned means that the message will be queued before //! the WebSocket handshake is complete. The handshake could fail for some reason, and then the //! queued message would be wasted effort. Sending messages in the Factory is not bad for simple, //! short-lived, or toy projects, but let's explore writing a handler that is better for //! long-running applications. //! //! In order to solve the problem of sending a message immediately when a WebSocket connection is //! established, you will need to write a Handler that implements the `on_open` method. For //! example: //! //! ```no_run //! use ws::{connect, Handler, Sender, Handshake, Result, Message, CloseCode}; //! //! // Our Handler struct. //! // Here we explicity indicate that the Client needs a Sender, //! // whereas a closure captures the Sender for us automatically. //! struct Client { //! out: Sender, //! } //! //! // We implement the Handler trait for Client so that we can get more //! // fine-grained control of the connection. //! impl Handler for Client { //! //! // `on_open` will be called only after the WebSocket handshake is successful //! // so at this point we know that the connection is ready to send/receive messages. //! // We ignore the `Handshake` for now, but you could also use this method to setup //! // Handler state or reject the connection based on the details of the Request //! // or Response, such as by checking cookies or Auth headers. //! fn on_open(&mut self, _: Handshake) -> Result<()> { //! // Now we don't need to call unwrap since `on_open` returns a `Result<()>`. //! // If this call fails, it will only result in this connection disconnecting. //! self.out.send("Hello WebSocket") //! } //! //! // `on_message` is roughly equivalent to the Handler closure. It takes a `Message` //! // and returns a `Result<()>`. //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Close the connection when we get a response from the server //! println!("Got message: {}", msg); //! self.out.close(CloseCode::Normal) //! } //! } //! //! // Now, instead of a closure, the Factory returns a new instance of our Handler. //! connect("ws://127.0.0.1:3012", |out| { Client { out: out } }).unwrap() //! ``` //! //! That is a big increase in verbosity in order to accomplish the same effect as the //! original example, but this way is more flexible and gives you access to more of the underlying //! details of the WebSocket connection. //! //! Another method you will probably want to implement is `on_close`. This method is called anytime //! the other side of the WebSocket connection attempts to close the connection. Implementing //! `on_close` gives you a mechanism for informing the user regarding why the WebSocket connection //! may have been closed, and it also gives you an opportunity to clean up any resources or state //! that may be dependent on the connection that is now about to disconnect. //! //! An example server might use this as follows: //! //! ```no_run //! use ws::{listen, Handler, Sender, Result, Message, CloseCode}; //! //! struct Server { //! out: Sender, //! } //! //! impl Handler for Server { //! //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Echo the message back //! self.out.send(msg) //! } //! //! fn on_close(&mut self, code: CloseCode, reason: &str) { //! // The WebSocket protocol allows for a utf8 reason for the closing state after the //! // close code. WS-RS will attempt to interpret this data as a utf8 description of the //! // reason for closing the connection. I many cases, `reason` will be an empty string. //! // So, you may not normally want to display `reason` to the user, //! // but let's assume that we know that `reason` is human-readable. //! match code { //! CloseCode::Normal => println!("The client is done with the connection."), //! CloseCode::Away => println!("The client is leaving the site."), //! _ => println!("The client encountered an error: {}", reason), //! } //! } //! } //! //! listen("127.0.0.1:3012", |out| { Server { out: out } }).unwrap() //! ``` //! //! Errors don't just occur on the other side of the connection, sometimes your code will encounter //! an exceptional state too. You can access errors by implementing `on_error`. By implementing //! `on_error` you can inform the user of an error and tear down any resources that you may have //! setup for the connection, but which are not owned by the Handler. Also, note that certain kinds //! of errors have certain ramifications within the WebSocket protocol. WS-RS will take care of //! sending the appropriate close code. //! //! A server that tracks state outside of the handler might be as follows: //! //! ```no_run //! //! use std::rc::Rc; //! use std::cell::RefCell; //! //! use ws::{listen, Handler, Sender, Result, Message, Handshake, CloseCode, Error}; //! //! struct Server { //! out: Sender, //! count: Rc<RefCell<usize>>, //! } //! //! impl Handler for Server { //! //! fn on_open(&mut self, _: Handshake) -> Result<()> { //! // We have a new connection, so we increment the connection counter //! Ok(*self.count.borrow_mut() += 1) //! } //! //! fn on_message(&mut self, msg: Message) -> Result<()> { //! // Tell the user the current count //! println!("The number of live connections is {}", *self.count.borrow()); //! //! // Echo the message back //! self.out.send(msg) //! } //! //! fn on_close(&mut self, code: CloseCode, reason: &str) { //! match code { //! CloseCode::Normal => println!("The client is done with the connection."), //! CloseCode::Away => println!("The client is leaving the site."), //! _ => println!("The client encountered an error: {}", reason), //! } //! //! // The connection is going down, so we need to decrement the count //! *self.count.borrow_mut() -= 1 //! } //! //! fn on_error(&mut self, err: Error) { //! println!("The server encountered an error: {:?}", err); //! //! // The connection is going down, so we need to decrement the count //! *self.count.borrow_mut() -= 1 //! } //! //! } //! // RefCell enforces Rust borrowing rules at runtime. //! // Calling borrow_mut will panic if the count being borrowed, //! // but we know already that only one handler at a time will ever try to change the count. //! // Rc is a reference-counted box for sharing the count between handlers //! // since each handler needs to own its contents. //! let count = Rc::new(RefCell::new(0)); //! listen("127.0.0.1:3012", |out| { Server { out: out, count: count.clone() } }).unwrap() //! ``` //! //! There are other Handler methods that allow even more fine-grained access, but most applications //! will usually only need these four methods. //! extern crate httparse; extern crate mio; extern crate sha1; extern crate rand; extern crate url; #[macro_use] extern crate log; mod result; mod connection; mod frame; mod message; mod handshake; mod protocol; mod communication; mod io; pub use connection::factory::Factory; pub use connection::factory::Settings as WebSocketSettings; pub use connection::handler::Handler; pub use connection::handler::Settings as ConnectionSettings; pub use result::{Result, Error}; pub use result::Kind as ErrorKind; pub use message::Message; pub use communication::Sender; pub use protocol::CloseCode; pub use handshake::{Handshake, Request, Response}; use std::fmt; use std::net::ToSocketAddrs; use mio::EventLoopConfig; use std::borrow::Borrow; /// A utility function for setting up a WebSocket server. /// /// # Safety /// /// This function blocks until the EventLoop finishes running. Avoid calling this method within /// another WebSocket handler. /// /// # Examples /// /// ```no_run /// use ws::listen; /// /// listen("127.0.0.1:3012", |out| { /// move |msg| { /// out.send(msg) /// } /// }).unwrap() /// ``` /// pub fn listen<A, F, H>(addr: A, factory: F) -> Result<()> where A: ToSocketAddrs + fmt::Debug, F: FnMut(Sender) -> H, H: Handler, { let ws = try!(WebSocket::new(factory)); try!(ws.listen(addr)); Ok(()) } /// A utility function for setting up a WebSocket client. /// /// # Safety /// /// This function blocks until the EventLoop finishes running. Avoid calling this method within /// another WebSocket handler. If you need to establish a connection from inside of a handler, /// use the `connect` method on the Sender. /// /// # Examples /// /// ```no_run /// use ws::{connect, CloseCode}; /// /// connect("ws://127.0.0.1:3012", |out| { /// out.send("Hello WebSocket").unwrap(); /// /// move |msg| { /// println!("Got message: {}", msg); /// out.close(CloseCode::Normal) /// } /// }).unwrap() /// ``` /// pub fn connect<U, F, H>(url: U, factory: F) -> Result<()> where U: Borrow<str>, F: FnMut(Sender) -> H, H: Handler { let mut ws = try!(WebSocket::new(factory)); let parsed = try!( url::Url::parse(url.borrow()) .map_err(|err| Error::new( ErrorKind::Internal, format!("Unable to parse {} as url due to {:?}", url.borrow(), err)))); try!(ws.connect(parsed)); try!(ws.run()); Ok(()) } /// The WebSocket struct. A WebSocket can support multiple incoming and outgoing connections. pub struct WebSocket<F> where F: Factory { event_loop: io::Loop<F>, handler: io::Handler<F>, } impl<F> WebSocket<F>
let mut config = EventLoopConfig::new(); config.notify_capacity(max + 1000); WebSocket::with_config(factory, config) } /// Create a new WebSocket with a Factory and use the event loop config to provide settings for /// the event loop. pub fn with_config(factory: F, config: EventLoopConfig) -> Result<WebSocket<F>> { Ok(WebSocket { event_loop: try!(io::Loop::configured(config)), handler: io::Handler::new(factory), }) } /// Consume the WebSocket and listen for new connections on the specified address. /// /// # Safety /// /// This method will block until the event loop finishes running. pub fn listen<A>(mut self, addr_spec: A) -> Result<WebSocket<F>> where A: ToSocketAddrs + fmt::Debug { let mut result = Err(Error::new(ErrorKind::Internal, format!("Unable to listen on {:?}", addr_spec))); for addr in try!(addr_spec.to_socket_addrs()) { result = self.handler.listen(&mut self.event_loop, &addr).map(|_| ()); if result.is_ok() { return self.run() } } result.map(|_| self) } /// Queue an outgoing connection on this WebSocket. This method may be called multiple times, /// but the actuall connections will not be established until after `run` is called. pub fn connect(&mut self, url: url::Url) -> Result<&mut WebSocket<F>> { let sender = Sender::new(io::ALL, self.event_loop.channel()); try!(sender.connect(url)); Ok(self) } /// Run the WebSocket. This will run the encapsulated event loop blocking until the WebSocket /// is shutdown. pub fn run(mut self) -> Result<WebSocket<F>> { try!(self.event_loop.run(&mut self.handler)); Ok(self) } }
where F: Factory { /// Create a new WebSocket using the given Factory to create handlers. pub fn new(mut factory: F) -> Result<WebSocket<F>> { let max = factory.settings().max_connections;
random_line_split
index.ts
import { assert } from '@0x/assert'; import { schemas } from '@0x/json-schemas'; import { AbiEncoder, abiUtils, BigNumber, decodeBytesAsRevertError, decodeThrownErrorAsRevertError, providerUtils, RevertError, StringRevertError, } from '@0x/utils'; import { Web3Wrapper } from '@0x/web3-wrapper'; import { AbiDefinition, AbiType, BlockParam, CallData, ConstructorAbi, ContractAbi, DataItem, MethodAbi, SupportedProvider, TransactionReceiptWithDecodedLogs, TxData, TxDataPayable, } from 'ethereum-types'; import Account from 'ethereumjs-account'; import * as util from 'ethereumjs-util'; import { default as VM } from 'ethereumjs-vm'; import PStateManager from 'ethereumjs-vm/dist/state/promisified'; export { linkLibrariesInBytecode, methodAbiToFunctionSignature } from './utils'; import { AwaitTransactionSuccessOpts } from './types'; import { formatABIDataItem } from './utils'; export { SubscriptionManager } from './subscription_manager'; export { ContractEvent, SendTransactionOpts, AwaitTransactionSuccessOpts, ContractFunctionObj, ContractTxFunctionObj, SubscriptionErrors, } from './types'; export interface AbiEncoderByFunctionSignature { [key: string]: AbiEncoder.Method; } const ARBITRARY_PRIVATE_KEY = 'e331b6d69882b4cb4ea581d88e0b604039a3de5967688d3dcffdd2270c0fd109'; // tslint:disable: max-classes-per-file /** * @dev A promise-compatible type that exposes a `txHash` field. * Not used by BaseContract, but generated contracts will return it in * `awaitTransactionSuccessAsync()`. * Maybe there's a better place for this. */ export class PromiseWithTransactionHash<T> implements Promise<T> { public readonly txHashPromise: Promise<string>; private readonly _promise: Promise<T>; constructor(txHashPromise: Promise<string>, promise: Promise<T>) { this.txHashPromise = txHashPromise; this._promise = promise; } // tslint:disable:promise-function-async // tslint:disable:async-suffix public then<TResult>( onFulfilled?: (v: T) => TResult | Promise<TResult>, onRejected?: (reason: any) => Promise<never>, ): Promise<TResult> { return this._promise.then<TResult>(onFulfilled, onRejected); } public catch<TResult>(onRejected?: (reason: any) => Promise<TResult>): Promise<TResult | T> { return this._promise.catch(onRejected); } public finally(onFinally?: (() => void) | null): Promise<T> { return this._promise.finally(onFinally); } // tslint:enable:promise-function-async // tslint:enable:async-suffix get [Symbol.toStringTag](): 'Promise' { return this._promise[Symbol.toStringTag]; } } export class BaseContract { protected _abiEncoderByFunctionSignature: AbiEncoderByFunctionSignature; protected _web3Wrapper: Web3Wrapper; public abi: ContractAbi; public address: string; public contractName: string; public constructorArgs: any[] = []; public _deployedBytecodeIfExists?: Buffer; private _evmIfExists?: VM; private _evmAccountIfExists?: Buffer; protected static _formatABIDataItemList( abis: DataItem[], values: any[], formatter: (type: string, value: any) => any, ): any { return values.map((value: any, i: number) => formatABIDataItem(abis[i], value, formatter)); } protected static _lowercaseAddress(type: string, value: string): string { return type === 'address' ? value.toLowerCase() : value; } protected static _bigNumberToString(_type: string, value: any): any { return BigNumber.isBigNumber(value) ? value.toString() : value; } protected static _lookupConstructorAbi(abi: ContractAbi): ConstructorAbi { const constructorAbiIfExists = abi.find( (abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Constructor, // tslint:disable-next-line:no-unnecessary-type-assertion ) as ConstructorAbi | undefined; if (constructorAbiIfExists !== undefined) { return constructorAbiIfExists; } else { // If the constructor is not explicitly defined, it won't be included in the ABI. It is // still callable however, so we construct what the ABI would look like were it to exist. const defaultConstructorAbi: ConstructorAbi = { type: AbiType.Constructor, stateMutability: 'nonpayable', payable: false, inputs: [], }; return defaultConstructorAbi; } } protected static _throwIfCallResultIsRevertError(rawCallResult: string): void { // Try to decode the call result as a revert error. let revert: RevertError; try { revert = decodeBytesAsRevertError(rawCallResult); } catch (err) { // Can't decode it as a revert error, so assume it didn't revert. return; } throw revert; } protected static _throwIfThrownErrorIsRevertError(error: Error): void { // Try to decode a thrown error. let revertError: RevertError; try { revertError = decodeThrownErrorAsRevertError(error); } catch (err) { // Can't decode it. return; } // Re-cast StringRevertErrors as plain Errors for backwards-compatibility. if (revertError instanceof StringRevertError) { throw new Error(revertError.values.message as string); } throw revertError; } protected static _throwIfUnexpectedEmptyCallResult(rawCallResult: string, methodAbi: AbiEncoder.Method): void { // With live nodes, we will receive an empty call result if: // 1. The function has no return value. // 2. The contract reverts without data. // 3. The contract reverts with an invalid opcode (`assert(false)` or `invalid()`). if (!rawCallResult || rawCallResult === '0x') { const returnValueDataItem = methodAbi.getReturnValueDataItem(); if (returnValueDataItem.components === undefined || returnValueDataItem.components.length === 0) { // Expected no result (which makes it hard to tell if the call reverted). return; } throw new Error(`Function "${methodAbi.getSignature()}" reverted with no data`); } } // Throws if the given arguments cannot be safely/correctly encoded based on // the given inputAbi. An argument may not be considered safely encodeable // if it overflows the corresponding Solidity type, there is a bug in the // encoder, or the encoder performs unsafe type coercion. public static strictArgumentEncodingCheck(inputAbi: DataItem[], args: any[]): string { const abiEncoder = AbiEncoder.create(inputAbi); const params = abiUtils.parseEthersParams(inputAbi); const rawEncoded = abiEncoder.encode(args); const rawDecoded = abiEncoder.decodeAsArray(rawEncoded); for (let i = 0; i < rawDecoded.length; i++) { const original = args[i]; const decoded = rawDecoded[i]; if (!abiUtils.isAbiDataEqual(params.names[i], params.types[i], original, decoded)) { throw new Error( `Cannot safely encode argument: ${params.names[i]} (${original}) of type ${ params.types[i] }. (Possible type overflow or other encoding error)`, ); } } return rawEncoded; } protected static async _applyDefaultsToContractTxDataAsync<T extends Partial<TxData | TxDataPayable>>( txData: T, estimateGasAsync?: (txData: T) => Promise<number>, ): Promise<TxData> { const txDataWithDefaults = BaseContract._removeUndefinedProperties<T>(txData); if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) { txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults); } if (txDataWithDefaults.from !== undefined) { txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase(); } return txDataWithDefaults as TxData; } protected static _assertCallParams(callData: Partial<CallData>, defaultBlock?: BlockParam): void { assert.doesConformToSchema('callData', callData, schemas.callDataSchema, [ schemas.addressSchema, schemas.numberSchema, schemas.jsNumber, ]); if (defaultBlock !== undefined) { assert.isBlockParam('defaultBlock', defaultBlock); } } private static _removeUndefinedProperties<T>(props: any): T { const clonedProps = { ...props }; Object.keys(clonedProps).forEach(key => clonedProps[key] === undefined && delete clonedProps[key]); return clonedProps; } protected _promiseWithTransactionHash( txHashPromise: Promise<string>, opts: AwaitTransactionSuccessOpts, ): PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs> { return new PromiseWithTransactionHash<TransactionReceiptWithDecodedLogs>( txHashPromise, (async (): Promise<TransactionReceiptWithDecodedLogs> => { // When the transaction hash resolves, wait for it to be mined. return this._web3Wrapper.awaitTransactionSuccessAsync( await txHashPromise, opts.pollingIntervalMs, opts.timeoutMs, ); })(), ); } protected async _applyDefaultsToTxDataAsync<T extends Partial<TxData | TxDataPayable>>( txData: T, estimateGasAsync?: (txData: T) => Promise<number>, ): Promise<TxData> { // Gas amount sourced with the following priorities: // 1. Optional param passed in to public method call // 2. Global config passed in at library instantiation // 3. Gas estimate calculation + safety margin // tslint:disable-next-line:no-object-literal-type-assertion const txDataWithDefaults = { to: this.address, ...this._web3Wrapper.getContractDefaults(), ...BaseContract._removeUndefinedProperties(txData), } as T; if (txDataWithDefaults.gas === undefined && estimateGasAsync !== undefined) { txDataWithDefaults.gas = await estimateGasAsync(txDataWithDefaults); } if (txDataWithDefaults.from !== undefined) { txDataWithDefaults.from = txDataWithDefaults.from.toLowerCase(); } return txDataWithDefaults as TxData; } protected async _evmExecAsync(encodedData: string): Promise<string> { const encodedDataBytes = Buffer.from(encodedData.substr(2), 'hex'); const addressBuf = Buffer.from(this.address.substr(2), 'hex'); // should only run once, the first time it is called if (this._evmIfExists === undefined) { const vm = new VM({}); const psm = new PStateManager(vm.stateManager); // create an account with 1 ETH const accountPk = Buffer.from(ARBITRARY_PRIVATE_KEY, 'hex'); const accountAddress = util.privateToAddress(accountPk); const account = new Account({ balance: 1e18 }); await psm.putAccount(accountAddress, account); // 'deploy' the contract if (this._deployedBytecodeIfExists === undefined) { const contractCode = await this._web3Wrapper.getContractCodeAsync(this.address); this._deployedBytecodeIfExists = Buffer.from(contractCode.substr(2), 'hex'); } await psm.putContractCode(addressBuf, this._deployedBytecodeIfExists); // save for later this._evmIfExists = vm; this._evmAccountIfExists = accountAddress; } let rawCallResult; try { const result = await this._evmIfExists.runCall({ to: addressBuf, caller: this._evmAccountIfExists, origin: this._evmAccountIfExists, data: encodedDataBytes, }); rawCallResult = `0x${result.execResult.returnValue.toString('hex')}`; } catch (err) { BaseContract._throwIfThrownErrorIsRevertError(err); throw err; } BaseContract._throwIfCallResultIsRevertError(rawCallResult); return rawCallResult; } protected async _performCallAsync(callData: Partial<CallData>, defaultBlock?: BlockParam): Promise<string> { const callDataWithDefaults = await this._applyDefaultsToTxDataAsync(callData); let rawCallResult: string; try { rawCallResult = await this._web3Wrapper.callAsync(callDataWithDefaults, defaultBlock); } catch (err) { BaseContract._throwIfThrownErrorIsRevertError(err); throw err; } BaseContract._throwIfCallResultIsRevertError(rawCallResult); return rawCallResult; } protected _lookupAbiEncoder(functionSignature: string): AbiEncoder.Method { const abiEncoder = this._abiEncoderByFunctionSignature[functionSignature]; if (abiEncoder === undefined) { throw new Error(`Failed to lookup method with function signature '${functionSignature}'`); } return abiEncoder; } protected _lookupAbi(functionSignature: string): MethodAbi { const methodAbi = this.abi.find((abiDefinition: AbiDefinition) => { if (abiDefinition.type !== AbiType.Function) { return false; } // tslint:disable-next-line:no-unnecessary-type-assertion const abiFunctionSignature = new AbiEncoder.Method(abiDefinition as MethodAbi).getSignature(); if (abiFunctionSignature === functionSignature) { return true; } return false; }) as MethodAbi; return methodAbi; } protected _strictEncodeArguments(functionSignature: string, functionArguments: any): string { const abiEncoder = this._lookupAbiEncoder(functionSignature); const inputAbi = abiEncoder.getDataItem().components; if (inputAbi === undefined)
const abiEncodedArguments = abiEncoder.encode(functionArguments); return abiEncodedArguments; } /// @dev Constructs a contract wrapper. /// @param contractName Name of contract. /// @param abi of the contract. /// @param address of the deployed contract. /// @param supportedProvider for communicating with an ethereum node. /// @param logDecodeDependencies the name and ABI of contracts whose event logs are /// decoded by this wrapper. /// @param deployedBytecode the deployedBytecode of the contract, used for executing /// pure Solidity functions in memory. This is different from the bytecode. constructor( contractName: string, abi: ContractAbi, address: string, supportedProvider: SupportedProvider, callAndTxnDefaults?: Partial<CallData>, logDecodeDependencies?: { [contractName: string]: ContractAbi }, deployedBytecode?: string, ) { assert.isString('contractName', contractName); assert.isETHAddressHex('address', address); if (deployedBytecode !== undefined && deployedBytecode !== '') { // `deployedBytecode` might contain references to // unlinked libraries and, hence, would not be a hex string. We'll just // leave `_deployedBytecodeIfExists` empty if this is the case. // TODO(dorothy-zbornak): We should link the `deployedBytecode` // beforehand in the generated wrappers. try { assert.isHexString('deployedBytecode', deployedBytecode); this._deployedBytecodeIfExists = Buffer.from(deployedBytecode.substr(2), 'hex'); } catch (err) { // Do nothing. } } const provider = providerUtils.standardizeOrThrow(supportedProvider); if (callAndTxnDefaults !== undefined) { assert.doesConformToSchema('callAndTxnDefaults', callAndTxnDefaults, schemas.callDataSchema, [ schemas.addressSchema, schemas.numberSchema, schemas.jsNumber, ]); } this.contractName = contractName; this._web3Wrapper = new Web3Wrapper(provider, callAndTxnDefaults); this.abi = abi; this.address = address; const methodAbis = this.abi.filter( (abiDefinition: AbiDefinition) => abiDefinition.type === AbiType.Function, ) as MethodAbi[]; this._abiEncoderByFunctionSignature = {}; methodAbis.forEach(methodAbi => { const abiEncoder = new AbiEncoder.Method(methodAbi); const functionSignature = abiEncoder.getSignature(); this._abiEncoderByFunctionSignature[functionSignature] = abiEncoder; this._web3Wrapper.abiDecoder.addABI(abi, contractName); }); if (logDecodeDependencies) { Object.entries(logDecodeDependencies).forEach(([dependencyName, dependencyAbi]) => this._web3Wrapper.abiDecoder.addABI(dependencyAbi, dependencyName), ); } } }
{ throw new Error(`Undefined Method Input ABI`); }
conditional_block