file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
config.go
// Package dfc provides distributed file-based cache with Amazon and Google Cloud backends. /* * Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. * */ package dfc import ( "encoding/json" "flag" "fmt" "io/ioutil"
"github.com/golang/glog" ) const ( KiB = 1024 MiB = 1024 * KiB GiB = 1024 * MiB ) // checksums: xattr, http header, and config const ( xattrXXHashVal = "user.obj.dfchash" xattrObjVersion = "user.obj.version" ChecksumNone = "none" ChecksumXXHash = "xxhash" ChecksumMD5 = "md5" VersionAll = "all" VersionCloud = "cloud" VersionLocal = "local" VersionNone = "none" ) const ( AckWhenInMem = "memory" AckWhenOnDisk = "disk" // the default ) const ( lbname = "localbuckets" // base name of the lbconfig file; not to confuse with config.Localbuckets mpath sub-directory mpname = "mpaths" // base name to persist ctx.mountpaths ) //============================== // // config types // //============================== type dfconfig struct { Confdir string `json:"confdir"` CloudProvider string `json:"cloudprovider"` CloudBuckets string `json:"cloud_buckets"` LocalBuckets string `json:"local_buckets"` // structs Log logconfig `json:"log"` Periodic periodic `json:"periodic"` Timeout timeoutconfig `json:"timeout"` Proxy proxyconfig `json:"proxyconfig"` LRU lruconfig `json:"lru_config"` Rebalance rebalanceconf `json:"rebalance_conf"` Cksum cksumconfig `json:"cksum_config"` Ver versionconfig `json:"version_config"` FSpaths map[string]string `json:"fspaths"` TestFSP testfspathconf `json:"test_fspaths"` Net netconfig `json:"netconfig"` FSKeeper fskeeperconf `json:"fskeeper"` Experimental experimental `json:"experimental"` H2c bool `json:"h2c"` } type logconfig struct { Dir string `json:"logdir"` // log directory Level string `json:"loglevel"` // log level aka verbosity MaxSize uint64 `json:"logmaxsize"` // size that triggers log rotation MaxTotal uint64 `json:"logmaxtotal"` // max total size of all the logs in the log directory } type periodic struct { StatsTimeStr string `json:"stats_time"` KeepAliveTimeStr string `json:"keep_alive_time"` // omitempty StatsTime time.Duration `json:"-"` KeepAliveTime time.Duration `json:"-"` } // timeoutconfig contains timeouts used for intra-cluster communication type timeoutconfig struct { DefaultStr string `json:"default"` Default time.Duration `json:"-"` // omitempty DefaultLongStr string `json:"default_long"` DefaultLong time.Duration `json:"-"` // MaxKeepaliveStr string `json:"max_keepalive"` MaxKeepalive time.Duration `json:"-"` // ProxyPingStr string `json:"proxy_ping"` ProxyPing time.Duration `json:"-"` // VoteRequestStr string `json:"vote_request"` VoteRequest time.Duration `json:"-"` // } type proxyconfig struct { Primary proxycnf `json:"primary"` Original proxycnf `json:"original"` } type proxycnf struct { ID string `json:"id"` // used to register caching servers/other proxies URL string `json:"url"` // used to register caching servers/other proxies Passthru bool `json:"passthru"` // false: get then redirect, true (default): redirect right away } type lruconfig struct { LowWM uint32 `json:"lowwm"` // capacity usage low watermark HighWM uint32 `json:"highwm"` // capacity usage high watermark AtimeCacheMax uint64 `json:"atime_cache_max"` // atime cache - max num entries DontEvictTimeStr string `json:"dont_evict_time"` // eviction is not permitted during [atime, atime + dont] CapacityUpdTimeStr string `json:"capacity_upd_time"` // min time to update capacity DontEvictTime time.Duration `json:"-"` // omitempty CapacityUpdTime time.Duration `json:"-"` // ditto LRUEnabled bool `json:"lru_enabled"` // LRU will only run when LRUEnabled is true } type rebalanceconf struct { StartupDelayTimeStr string `json:"startup_delay_time"` StartupDelayTime time.Duration `json:"-"` // omitempty RebalancingEnabled bool `json:"rebalancing_enabled"` } type testfspathconf struct { Root string `json:"root"` Count int `json:"count"` Instance int `json:"instance"` } type netconfig struct { IPv4 string `json:"ipv4"` L4 l4cnf `json:"l4"` HTTP httpcnf `json:"http"` } type l4cnf struct { Proto string `json:"proto"` // tcp, udp Port string `json:"port"` // listening port } type httpcnf struct { MaxNumTargets int `json:"max_num_targets"` // estimated max num targets (to count idle conns) UseHTTPS bool `json:"use_https"` // use HTTPS instead of HTTP Certificate string `json:"server_certificate"` // HTTPS: openssl certificate Key string `json:"server_key"` // HTTPS: openssl key } type cksumconfig struct { Checksum string `json:"checksum"` // DFC checksum: xxhash:none ValidateColdGet bool `json:"validate_cold_get"` // MD5 (ETag) validation upon cold GET } type versionconfig struct { ValidateWarmGet bool `json:"validate_warm_get"` // True: validate object version upon warm GET Versioning string `json:"versioning"` // types of objects versioning is enabled for: all, cloud, local, none } type fskeeperconf struct { FSCheckTimeStr string `json:"fs_check_time"` FSCheckTime time.Duration `json:"-"` // omitempty OfflineFSCheckTimeStr string `json:"offline_fs_check_time"` OfflineFSCheckTime time.Duration `json:"-"` // omitempty Enabled bool `json:"fskeeper_enabled"` } type experimental struct { AckPut string `json:"ack_put"` MaxMemMB int `json:"max_mem_mb"` // max memory size for the "memory" option - FIXME: niy } //============================== // // config functions // //============================== func initconfigparam() error { getConfig(clivars.conffile) err := flag.Lookup("log_dir").Value.Set(ctx.config.Log.Dir) if err != nil { glog.Errorf("Failed to flag-set glog dir %q, err: %v", ctx.config.Log.Dir, err) } if err = CreateDir(ctx.config.Log.Dir); err != nil { glog.Errorf("Failed to create log dir %q, err: %v", ctx.config.Log.Dir, err) return err } if err = validateconf(); err != nil { return err } // glog rotate glog.MaxSize = ctx.config.Log.MaxSize if glog.MaxSize > GiB { glog.Errorf("Log.MaxSize %d exceeded 1GB, setting the default 1MB", glog.MaxSize) glog.MaxSize = MiB } // CLI override if clivars.statstime != 0 { ctx.config.Periodic.StatsTime = clivars.statstime } if clivars.proxyurl != "" { ctx.config.Proxy.Primary.ID = "" ctx.config.Proxy.Primary.URL = clivars.proxyurl } if clivars.loglevel != "" { if err = setloglevel(clivars.loglevel); err != nil { glog.Errorf("Failed to set log level = %s, err: %v", clivars.loglevel, err) } } else { if err = setloglevel(ctx.config.Log.Level); err != nil { glog.Errorf("Failed to set log level = %s, err: %v", ctx.config.Log.Level, err) } } if build != "" { glog.Infof("Build: %s", build) // git rev-parse --short HEAD } glog.Infof("Logdir: %q Proto: %s Port: %s Verbosity: %s", ctx.config.Log.Dir, ctx.config.Net.L4.Proto, ctx.config.Net.L4.Port, ctx.config.Log.Level) glog.Infof("Config: %q Role: %s StatsTime: %v", clivars.conffile, clivars.role, ctx.config.Periodic.StatsTime) return err } func getConfig(fpath string) { raw, err := ioutil.ReadFile(fpath) if err != nil { glog.Errorf("Failed to read config %q, err: %v", fpath, err) os.Exit(1) } err = json.Unmarshal(raw, &ctx.config) if err != nil { glog.Errorf("Failed to json-unmarshal config %q, err: %v", fpath, err) os.Exit(1) } } func validateVersion(version string) error { versions := []string{VersionAll, VersionCloud, VersionLocal, VersionNone} versionValid := false for _, v := range versions { if v == version { versionValid = true break } } if !versionValid { return fmt.Errorf("Invalid version: %s - expecting one of %s", version, strings.Join(versions, ", ")) } return nil } // StartupDelayTimeStr string `json:"startup_delay_time"` // StartupDelayTime time.Duration `json:"-"` // omitempty func validateconf() (err error) { // durations if ctx.config.Periodic.StatsTime, err = time.ParseDuration(ctx.config.Periodic.StatsTimeStr); err != nil { return fmt.Errorf("Bad stats-time format %s, err: %v", ctx.config.Periodic.StatsTimeStr, err) } if ctx.config.Timeout.Default, err = time.ParseDuration(ctx.config.Timeout.DefaultStr); err != nil { return fmt.Errorf("Bad Timeout default format %s, err: %v", ctx.config.Timeout.DefaultStr, err) } if ctx.config.Timeout.DefaultLong, err = time.ParseDuration(ctx.config.Timeout.DefaultLongStr); err != nil { return fmt.Errorf("Bad Timeout default_long format %s, err %v", ctx.config.Timeout.DefaultLongStr, err) } if ctx.config.Periodic.KeepAliveTime, err = time.ParseDuration(ctx.config.Periodic.KeepAliveTimeStr); err != nil { return fmt.Errorf("Bad keep_alive_time format %s, err: %v", ctx.config.Periodic.KeepAliveTimeStr, err) } if ctx.config.LRU.DontEvictTime, err = time.ParseDuration(ctx.config.LRU.DontEvictTimeStr); err != nil { return fmt.Errorf("Bad dont_evict_time format %s, err: %v", ctx.config.LRU.DontEvictTimeStr, err) } if ctx.config.LRU.CapacityUpdTime, err = time.ParseDuration(ctx.config.LRU.CapacityUpdTimeStr); err != nil { return fmt.Errorf("Bad capacity_upd_time format %s, err: %v", ctx.config.LRU.CapacityUpdTimeStr, err) } if ctx.config.Rebalance.StartupDelayTime, err = time.ParseDuration(ctx.config.Rebalance.StartupDelayTimeStr); err != nil { return fmt.Errorf("Bad startup_delay_time format %s, err: %v", ctx.config.Rebalance.StartupDelayTimeStr, err) } hwm, lwm := ctx.config.LRU.HighWM, ctx.config.LRU.LowWM if hwm <= 0 || lwm <= 0 || hwm < lwm || lwm > 100 || hwm > 100 { return fmt.Errorf("Invalid LRU configuration %+v", ctx.config.LRU) } if ctx.config.TestFSP.Count == 0 { for fp1 := range ctx.config.FSpaths { for fp2 := range ctx.config.FSpaths { if fp1 != fp2 && (strings.HasPrefix(fp1, fp2) || strings.HasPrefix(fp2, fp1)) { return fmt.Errorf("Invalid fspaths: %q is a prefix or includes as a prefix %q", fp1, fp2) } } } } if ctx.config.Cksum.Checksum != ChecksumXXHash && ctx.config.Cksum.Checksum != ChecksumNone { return fmt.Errorf("Invalid checksum: %s - expecting %s or %s", ctx.config.Cksum.Checksum, ChecksumXXHash, ChecksumNone) } if err := validateVersion(ctx.config.Ver.Versioning); err != nil { return err } if ctx.config.FSKeeper.FSCheckTime, err = time.ParseDuration(ctx.config.FSKeeper.FSCheckTimeStr); err != nil { return fmt.Errorf("Bad FSKeeper fs_check_time format %s, err %v", ctx.config.FSKeeper.FSCheckTimeStr, err) } if ctx.config.FSKeeper.OfflineFSCheckTime, err = time.ParseDuration(ctx.config.FSKeeper.OfflineFSCheckTimeStr); err != nil { return fmt.Errorf("Bad FSKeeper offline_fs_check_time format %s, err %v", ctx.config.FSKeeper.OfflineFSCheckTimeStr, err) } if ctx.config.Timeout.MaxKeepalive, err = time.ParseDuration(ctx.config.Timeout.MaxKeepaliveStr); err != nil { return fmt.Errorf("Bad Timeout max_keepalive format %s, err %v", ctx.config.Timeout.MaxKeepaliveStr, err) } if ctx.config.Timeout.ProxyPing, err = time.ParseDuration(ctx.config.Timeout.ProxyPingStr); err != nil { return fmt.Errorf("Bad Timeout proxy_ping format %s, err %v", ctx.config.Timeout.ProxyPingStr, err) } if ctx.config.Timeout.VoteRequest, err = time.ParseDuration(ctx.config.Timeout.VoteRequestStr); err != nil { return fmt.Errorf("Bad Timeout vote_request format %s, err %v", ctx.config.Timeout.VoteRequestStr, err) } return nil } func setloglevel(loglevel string) (err error) { v := flag.Lookup("v").Value if v == nil { return fmt.Errorf("nil -v Value") } err = v.Set(loglevel) if err == nil { ctx.config.Log.Level = loglevel } return } func writeConfigFile() error { return localSave(clivars.conffile, ctx.config) }
"os" "strings" "time"
random_line_split
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct DNode { x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 { let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0; } else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1; } else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while !(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1] != '.' as u8 || bytes[ox] != '@' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } } } if has_part_b
let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
{ for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } }
conditional_block
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct DNode { x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64
fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1] != '.' as u8 || bytes[ox] != '@' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } } } if has_part_b { for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
{ let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0; } else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1; } else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while !(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; }
identifier_body
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct DNode { x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 { let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0;
} else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while !(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1] != '.' as u8 || bytes[ox] != '@' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } } } if has_part_b { for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
} else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1;
random_line_split
day18.rs
// day 18 use std::collections::HashMap; struct MazeNode { obstacle: bool, key_index: i64, door_index: i64, } struct Key { x: usize, y: usize, symbol: char, } struct Door { _x: usize, _y: usize, symbol: char, key_index: usize, } struct Maze { grid: Vec<Vec<MazeNode>>, keys: Vec<Key>, doors: Vec<Door>, cached: HashMap<usize, HashMap<usize, CachedPath>>, width: usize, height: usize, } struct CachedPath { dist: i64, keys: Vec<usize>, } #[derive(Clone)] struct
{ x: usize, y: usize, dist: usize, parent_x:usize, parent_y:usize } #[derive(Clone)] struct DNodeB { at:Vec<usize>, keys:Vec<usize>, dist:usize } fn intersect_count (vec_a:&Vec<usize>, vec_b:&Vec<usize>)->usize { let mut count = 0; for i in 0..vec_a.len() { for j in 0..vec_b.len() { if vec_b[j] == vec_a[i] { count+=1; break; } } } return count; } fn keynodeindex(maze:&mut Maze, keys: &Vec<usize>, at: &Vec<usize>)->String { let mut ret = String::from(""); let mut keys2 = Vec::new(); for i in 0..keys.len() { keys2.push((*maze).keys[(*keys)[i]].symbol); } keys2.sort(); for i in 0..(*at).len() { ret.push((*maze).keys[(*at)[i]].symbol); } ret.push('|'); for i in 0..keys2.len() { ret.push(keys2[i]); } return ret; } fn dijkstra_b(maze:&mut Maze, origins:&Vec<usize>)->usize { let mut frontier:HashMap<String, DNodeB> = HashMap::new(); let mut frontier_next:HashMap<String, DNodeB> = HashMap::new(); let mut explored:HashMap<String, DNodeB> = HashMap::new(); let mut candidates:HashMap<String, usize> = HashMap::new(); let mut start = DNodeB{at:Vec::new(), keys:Vec::new(), dist:0}; for i in 0..(*origins).len() { start.at.push((*origins)[i]); start.keys.push((*origins)[i]); } frontier_next.insert(keynodeindex(maze, &(start.keys), &(start.at)), start); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let node2 = (*node).clone(); frontier.insert(key.to_string(), node2); } frontier_next.clear(); for key in frontier.keys() { //println!("Key {}", key); let node = frontier.get(key).unwrap(); if (*node).keys.len() == (*maze).keys.len() { if let Some(candidate) = candidates.get_mut(key) { if (*candidate) > (*node).dist { *candidate = (*node).dist; } } else { candidates.insert(key.to_string(), (*node).dist); } } // add to explored or update if let Some(explored_node) = explored.get_mut(key) { if (*explored_node).dist > (*node).dist { (*explored_node).keys.clear(); (*explored_node).at.clear(); for i in 0..(*node).keys.len() { (*explored_node).keys.push((*node).keys[i]); } for i in 0..(*node).at.len() { (*explored_node).at.push((*node).at[i]); } (*explored_node).dist = (*node).dist; } } else { let new_node = (*node).clone(); explored.insert(key.to_string(), new_node); } // add all next steps from all positions for p in 0..(*node).at.len() { for k in 0..(*maze).keys.len() { let mut present = false; for j in 0..(*node).keys.len() { if (*node).keys[j] == k { present = true; break; } } if present { continue; } let curr_key = (*node).at[p]; // if not accessible from current position if (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist < 0 { continue; } // if not accessible with current keys let required_keys = (*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().keys.clone(); if intersect_count(&((*node).keys), &required_keys) < required_keys.len() { continue; } let mut new_keys = (*node).keys.clone(); new_keys.push(k); let mut new_at = (*node).at.clone(); new_at[p] = k; let new_keys_index = keynodeindex(maze, &new_keys, &new_at); let new_dist = (*node).dist + ((*maze).cached.get(&curr_key).unwrap().get(&k).unwrap().dist as usize); // if previously explored and not shorter if explored.contains_key(&new_keys_index) && explored.get(&new_keys_index).unwrap().dist < new_dist { continue; } // if previously added to the frontier if frontier_next.contains_key(&new_keys_index) && frontier_next.get(&new_keys_index).unwrap().dist < new_dist { continue; } // add to frontier frontier_next.insert(new_keys_index, DNodeB{at:new_at, keys:new_keys, dist:new_dist}); } } } } let mut min_dist = 0; for candidate_key in candidates.keys() { let candidate = candidates.get(candidate_key).unwrap(); if min_dist == 0 || min_dist > *candidate { min_dist = *candidate; } } return min_dist; } fn exploredindex(maze: &mut Maze, x: usize, y:usize)->usize { return ((*maze).width * y) + x; } fn dijkstra_a(maze: &mut Maze, start_x:usize, start_y:usize, end_x:usize, end_y:usize, doors:&mut Vec<usize>, keys:&mut Vec<usize>, ret_doors_keys:bool)->i64 { let mut explored:HashMap<usize, DNode> = HashMap::new(); let mut frontier:HashMap<usize,DNode> = HashMap::new(); let mut frontier_next:HashMap<usize,DNode> = HashMap::new(); frontier_next.insert(exploredindex(maze, start_x, start_y), DNode{x:start_x, y:start_y, dist:0, parent_x:start_x, parent_y:start_y}); let dest_key = exploredindex(maze, end_x, end_y); while frontier_next.len() > 0 { frontier.clear(); for key in frontier_next.keys() { let node = frontier_next.get(key).unwrap(); let new_node = (*node).clone(); frontier.insert(*key, new_node); } frontier_next.clear(); for key in frontier.keys() { let node = frontier.get(key).unwrap(); let exploredindex1 = exploredindex(maze, (*node).x, (*node).y); if explored.contains_key(&exploredindex1) { let last_dist = explored.get(&exploredindex1).unwrap().dist; if (*node).dist < last_dist { let node2 = explored.get_mut(&exploredindex1).unwrap(); (*node2).dist = (*node).dist; (*node2).parent_x = (*node).parent_x; (*node2).parent_y = (*node).parent_y; } } else { let new_node = (*node).clone(); explored.insert(exploredindex1, new_node); } let mut xd:i64 = 0; let mut yd:i64 = 0; for i in 0..4 { if i == 0 { xd = -1; yd = 0; } else if i == 1 { xd = 1; yd = 0; } else if i == 2 { xd = 0; yd = 1; } else if i == 3 { xd = 0; yd = -1; } let x1 = (*node).x as i64 + xd; let y1 = (*node).y as i64 + yd; if x1 < 0 || x1 >= (*maze).width as i64 || y1 < 0 || y1 >= (*maze).height as i64 { continue; } else { if (*maze).grid[y1 as usize][x1 as usize].obstacle { continue; } let index = exploredindex(maze, x1 as usize, y1 as usize); let new_dist = (*node).dist + 1; if explored.contains_key(&index) && explored.get(&index).unwrap().dist <= new_dist { continue; } if frontier_next.contains_key(&index) && frontier_next.get(&index).unwrap().dist <= new_dist { continue; } frontier_next.insert(index, DNode{x:x1 as usize, y:y1 as usize, dist:new_dist, parent_x:(*node).x, parent_y:(*node).y}); } } } if explored.contains_key(&dest_key) { let end_node = explored.get(&dest_key).unwrap(); if ret_doors_keys { let mut curr_x = end_node.parent_x; let mut curr_y = end_node.parent_y; while !(curr_x == start_x && curr_y == start_y) { if (*maze).grid[curr_y][curr_x].key_index >= 0 { (*keys).push((*maze).grid[curr_y][curr_x].key_index as usize); } if (*maze).grid[curr_y][curr_x].door_index >= 0 { (*doors).push((*maze).grid[curr_y][curr_x].door_index as usize); } let index = exploredindex(maze, curr_x, curr_y); let trace = explored.get(&index).unwrap(); curr_x = trace.parent_x; curr_y = trace.parent_y; } } return end_node.dist as i64; } } return -1; } fn read_maze(input: Vec<String>, maze:&mut Maze)->usize { (*maze).width = input[0].len(); (*maze).height = input.len(); // read origin, obstacles, doors and keys for y in 0..(*maze).height { (*maze).grid.push(Vec::new()); for x in 0..(*maze).width { let byte = input[y].as_bytes()[x]; match byte { 35=>(*maze).grid[y].push(MazeNode{obstacle:true, door_index: -1, key_index: -1}), 46=>(*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: -1}), 65..=90=> {(*maze).doors.push(Door{_x:x,_y:y,symbol:(byte as char),key_index:0}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: ((*maze).doors.len() - 1) as i64, key_index: -1}); }, 97..=122=> {(*maze).keys.push(Key{x:x,y:y,symbol:((byte-32) as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64}); }, _=>{(*maze).keys.push(Key{x:x, y:y, symbol:(byte as char)}); (*maze).grid[y].push(MazeNode{obstacle:false, door_index: -1, key_index: ((*maze).keys.len() - 1) as i64});}, } } } // quick lookup for door/key correspondance for i in 0..(*maze).doors.len() { for j in 0..(*maze).keys.len() { if (*maze).keys[j].symbol == (*maze).doors[i].symbol { (*maze).doors[i].key_index = j; break; } } } // cache distances between each key for i in 0..(*maze).keys.len() { (*maze).cached.insert(i, HashMap::new()); } for i in 0..(*maze).keys.len() { for j in 0..(*maze).keys.len() { if j == i { continue; } let mut doors = Vec::new(); let mut keys = Vec::new(); let dist = dijkstra_a(maze, (*maze).keys[i].x, (*maze).keys[i].y, (*maze).keys[j].x, (*maze).keys[j].y, &mut doors, &mut keys, true); let mut doorkeys:Vec<usize> = Vec::new(); for k in 0..doors.len() { doorkeys.push((*maze).doors[doors[k]].key_index); } (*maze).cached.get_mut(&i).unwrap().insert(j, CachedPath{dist: dist, keys: doorkeys}); } } let mut first_keys:Vec<usize> = Vec::new(); for i in 0..(*maze).keys.len() { if (*maze).keys[i].symbol == '@' { first_keys.push(i); } } return dijkstra_b(maze, &first_keys); } pub fn run(file_path:&str) { let mut maze = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let mut maze2 = Maze{grid:Vec::new(), keys:Vec::new(), doors:Vec::new(), cached:HashMap::new(), height: 0, width: 0}; let vec = super::utility::util_fread(file_path); let mut vec2:Vec<String> = Vec::new(); let mut ox = 0; let mut oy = 0; if vec.len() == 0 { println!("Input not read properly"); return; } // test if maze is set up for part B for line in 0..vec.len() { let bytes = vec[line].as_bytes(); for pos in 0..bytes.len() { if bytes[pos] == '@' as u8 { ox = pos; oy = line; } } } let mut has_part_b = true; if ox + 1 >= vec[0].len() || (ox as i64 - 1) < 0 || oy + 1 >= vec.len() || (oy as i64 - 1) < 0 { has_part_b = false; } else { for y in oy-1..=oy+1 { let bytes = vec[y].as_bytes(); if y == oy-1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy && (bytes[ox-1] != '.' as u8 || bytes[ox] != '@' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } else if y == oy+1 && (bytes[ox-1] != '.' as u8 || bytes[ox] != '.' as u8 || bytes[ox+1] != '.' as u8) { has_part_b = false; break; } } } if has_part_b { for y in 0..vec.len() { let mut line = String::from(""); let bytes = vec[y].as_bytes(); for x in 0..vec[y].len() { if (x == ox - 1 && y == oy - 1) || (x == ox + 1 && y == oy - 1) || (x == ox - 1 && y == oy + 1) || (x == ox + 1 && y == oy + 1) { line.push('@'); } else if (x == ox && y == oy - 1) || (x == ox && y == oy + 1) || (x == ox - 1 && y == oy) || (x == ox + 1 && y == oy) || (x == ox && y == oy) { line.push('#'); } else { line.push(bytes[x] as char); } } vec2.push(line); } } let result_a = read_maze(vec, &mut maze); println!("Result A: {}", result_a); if has_part_b { let result_b = read_maze(vec2, &mut maze2); println!("Result B: {}", result_b); } }
DNode
identifier_name
mtaresolver.go
package resolver import ( "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/joho/godotenv" "github.com/pkg/errors" "github.com/SAP/cloud-mta/internal/logs" "github.com/SAP/cloud-mta/mta" ) const ( emptyModuleNameMsg = "provide a name for the module" moduleNotFoundMsg = `could not find the "%s" module` marshalFailsMag = `could not marshal the "%s" environment variable` missingPrefixMsg = `could not resolve the value for the "~{%s}" variable; missing required prefix` defaultEnvFileName = ".env" ) var envGetter = os.Environ // ResolveResult is the result of the Resolve function. This is serialized to json when requested. type ResolveResult struct { Properties map[string]string `json:"properties"` Messages []string `json:"messages"` } // Resolve - resolve module's parameters func Resolve(workspaceDir, moduleName, path string, extensions []string, envFile string) (result ResolveResult, messages []string, err error) { if len(moduleName) == 0 { return result, nil, errors.New(emptyModuleNameMsg) } mtaRaw, messages, err := mta.GetMtaFromFile(path, extensions, false) if err != nil { return result, messages, err } if len(workspaceDir) == 0 { workspaceDir = filepath.Dir(path) } // If environment file name is not provided - set the default file name to .env envFilePath := defaultEnvFileName if len(envFile) > 0 { envFilePath = envFile } m := NewMTAResolver(mtaRaw, workspaceDir) for _, module := range m.GetModules() { if module.Name == moduleName { m.ResolveProperties(module, envFilePath) propVarMap, err := getPropertiesAsEnvVar(module) if err != nil { return result, messages, err } result.Properties = propVarMap result.Messages = m.messages return result, messages, nil } } return result, messages, errors.Errorf(moduleNotFoundMsg, moduleName) } func getPropertiesAsEnvVar(module *mta.Module) (map[string]string, error) { envVar := map[string]interface{}{} for key, val := range module.Properties { envVar[key] = val } for _, requires := range module.Requires { propMap := envVar if len(requires.Group) > 0 { propMap = map[string]interface{}{} } for key, val := range requires.Properties { propMap[key] = val } if len(requires.Group) > 0 { //append the array element to group group, ok := envVar[requires.Group] if ok { groupArray := group.([]map[string]interface{}) envVar[requires.Group] = append(groupArray, propMap) } else { envVar[requires.Group] = []map[string]interface{}{propMap} } } } //serialize return serializePropertiesAsEnvVars(envVar) } func serializePropertiesAsEnvVars(envVar map[string]interface{}) (map[string]string, error) { retEnvVar := map[string]string{} for key, val := range envVar { switch v := val.(type) { case string: retEnvVar[key] = v default: bytesVal, err := json.Marshal(val) if err != nil { return nil, errors.Errorf(marshalFailsMag, key) } retEnvVar[key] = string(bytesVal) } } return retEnvVar, nil } // MTAResolver is used to resolve MTA properties' variables type MTAResolver struct { mta.MTA WorkingDir string context *ResolveContext messages []string } const resourceType = 1 const moduleType = 2 const variablePrefix = "~" const placeholderPrefix = "$" type mtaSource struct { Name string Parameters map[string]interface{} `yaml:"parameters,omitempty"` Properties map[string]interface{} `yaml:"properties,omitempty"` Type int Module *mta.Module Resource *mta.Resource } // NewMTAResolver is a factory function for MTAResolver func NewMTAResolver(m *mta.MTA, workspaceDir string) *MTAResolver { resolver := &MTAResolver{*m, workspaceDir, &ResolveContext{ global: map[string]string{}, modules: map[string]map[string]string{}, resources: map[string]map[string]string{}, }, []string{}} for _, module := range m.Modules { resolver.context.modules[module.Name] = map[string]string{} } for _, resource := range m.Resources { resolver.context.resources[resource.Name] = map[string]string{} } return resolver } func resolvePath(path string, parts ...string) string { absolutePath := path if !filepath.IsAbs(path) { absolutePath = filepath.Join(append(parts, absolutePath)...) } return absolutePath } // ResolveProperties is the main function to trigger the resolution func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) { if m.Parameters == nil { m.Parameters = map[string]interface{}{} } //add env variables for _, val := range envGetter() { pos := strings.Index(val, "=") if pos > 0 { key := strings.Trim(val[:pos], " ") value := strings.Trim(val[pos+1:], " ") m.addValueToContext(key, value) } } //add .env file in module's path to the module context if len(module.Path) > 0 { envFile := resolvePath(envFilePath, m.WorkingDir, module.Path) envMap, err := godotenv.Read(envFile) if err == nil { for key, value := range envMap { m.addValueToContext(key, value) } } } m.addServiceNames(module) //top level properties for key, value := range module.Properties { //no expected variables propValue := m.resolve(module, nil, value) module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue) } //required properties: for _, req := range module.Requires { requiredSource := m.findProvider(req.Name) for propName, PropValue := range req.Properties { resolvedValue := m.resolve(module, &req, PropValue) //replace value with resolved value req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue) } } } func (m *MTAResolver) addValueToContext(key, value string) { //if the key has format of "module/key", or "resource/key" writes the value to the module's context slashPos := strings.Index(key, "/") if slashPos > 0 { modName := key[:slashPos] key = key[slashPos+1:] modulesContext, ok := m.context.modules[modName] if !ok { modulesContext, ok = m.context.resources[modName] } if ok { modulesContext[key] = value } } else { m.context.global[key] = value } } func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolve(sourceModule, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolve(sourceModule, requires, v) } return valueObj case []interface{}: for i, v := range valueObj { valueObj[i] = m.resolve(sourceModule, requires, v) } return valueObj case string: return m.resolveString(sourceModule, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} { pos := 0 pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix) if pos < 0 { //no variables return value } varValue := m.getVariableValue(sourceModule, requires, variableName) if wholeValue { return varValue } for pos >= 0 { varValueStr, _ := convertToString(varValue) value = value[:pos] + varValueStr + value[pos+len(variableName)+3:] pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix) if pos >= 0 { varValue = m.getVariableValue(sourceModule, requires, variableName) } } return value } func convertToString(valueObj interface{}) (string, bool) { switch v := valueObj.(type) { case string: return v, false } valueBytes, err := json.Marshal(convertToJSONSafe(valueObj)) if err != nil { logs.Logger.Error(err) return "", false } return string(valueBytes), true } // return start position, name of variable and if it is a whole value func parseNextVariable(pos int, value string, prefix string) (int, string, bool) { endSign := "}" posStart := strings.Index(value[pos:], prefix+"{") if posStart < 0 { return -1, "", false } posStart += pos if string(value[posStart+2]) == "{" { endSign = "}}" } posEnd := strings.Index(value[posStart+2:], endSign) if posEnd < 0 { //bad value return -1, "", false } posEnd += posStart + 1 + len(endSign) wholeValue := posStart == 0 && posEnd == len(value)-1 return posStart, value[posStart+2 : posEnd], wholeValue } func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} { var providerName string if requires == nil { slashPos := strings.Index(variableName, "/") if slashPos > 0 { providerName = variableName[:slashPos] variableName = variableName[slashPos+1:] } else { m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName)) return "~{" + variableName + "}" } } else { providerName = requires.Name } source := m.findProvider(providerName) if source != nil { for propName, propValue := range source.Properties { if propName == variableName { //Do not pass module and requires, because it is a wrong scope //it is either global->module->requires //or global->resource propValue = m.resolvePlaceholders(nil, source, nil, propValue) return convertToJSONSafe(propValue) } } } if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" { provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id") if ok { m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName)) } } return "~{" + variableName + "}" } func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolvePlaceholders(sourceModule, source, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case []interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case string: return m.resolvePlaceholdersString(sourceModule, source, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolvePlaceholdersString(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, value string) interface{} { pos := 0 pos, placeholderName, wholeValue := parseNextVariable(pos, value, placeholderPrefix) if pos < 0 { return value } placeholderValue := m.getParameter(sourceModule, source, requires, placeholderName) if wholeValue { return placeholderValue } for pos >= 0 { phValueStr, _ := convertToString(placeholderValue) value = value[:pos] + phValueStr + value[pos+len(placeholderName)+3:] pos, placeholderName, _ = parseNextVariable(pos+len(phValueStr), value, placeholderPrefix) if pos >= 0 { placeholderValue = m.getParameter(sourceModule, source, requires, placeholderName) } } return value } func (m *MTAResolver) getParameterFromSource(source *mtaSource, paramName string) string
func (m *MTAResolver) getParameter(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, paramName string) string { //first on source parameters scope paramValStr := m.getParameterFromSource(source, paramName) //first on source parameters scope if paramValStr != "" { return paramValStr } //then try on requires level if requires != nil { paramVal, ok := getStringFromMap(requires.Parameters, paramName) if ok { return paramVal } } if sourceModule != nil { paramVal, ok := getStringFromMap(sourceModule.Parameters, paramName) if ok { return paramVal } //defaults to context's module params: paramValStr, ok = m.context.modules[sourceModule.Name][paramName] if ok { return paramValStr } } //then on MTA root scope paramVal, ok := getStringFromMap(m.Parameters, paramName) if ok { return paramVal } //then global scope paramValStr, ok = m.context.global[paramName] if ok { return paramValStr } if source == nil { m.addMessage(fmt.Sprint("Missing ", paramName)) } else { m.addMessage(fmt.Sprint("Missing ", source.Name+"/"+paramName)) } return "${" + paramName + "}" } func (m *MTAResolver) findProvider(name string) *mtaSource { for _, module := range m.Modules { for _, provides := range module.Provides { if provides.Name == name { source := mtaSource{Name: module.Name, Properties: provides.Properties, Parameters: nil, Type: moduleType, Module: module} return &source } } } //in case of resource, its name is the matching to the requires name for _, resource := range m.Resources { if resource.Name == name { source := mtaSource{Name: resource.Name, Properties: resource.Properties, Parameters: resource.Parameters, Type: resourceType, Resource: resource} return &source } } return nil } func (m *MTAResolver) addMessage(message string) { // This check is necessary so the same message won't be written twice. // This happens when a placeholder references a parameter that is not defined, // because we try to resolve the parameter while resolving the placeholder and then // we try to resolve the parameter again as a parameter. if !containsString(m.messages, message) { m.messages = append(m.messages, message) } } func containsString(slice []string, value string) bool { for _, curr := range slice { if curr == value { return true } } return false } func convertToJSONSafe(val interface{}) interface{} { switch v := val.(type) { case map[interface{}]interface{}: res := map[string]interface{}{} for k, v := range v { res[fmt.Sprint(k)] = convertToJSONSafe(v) } return res case []interface{}: for k, v2 := range v { v[k] = convertToJSONSafe(v2) } return v } return val } func getStringFromMap(params map[string]interface{}, key string) (string, bool) { // Only return the parameter value if it's a string, to prevent a panic. // Note: this is used mainly for parameter values during resolve. // The deployer DOES support non-string parameters, both as the whole value // (it keeps the same type) and inside a string. // It stringifies the value in side a string but it's not the usual json stringify. // For example, if we have this string: // prop_from_resource: "this is the prop: ~{some_prop}" // And some_prop is defined like this: // stuct_field: abc // We will get a resolved value like this from the Deployer: // "this is the prop: {stuct_field=abc}" // We do not support this use case currently. value, ok := params[key] if ok && value != nil { str, isString := value.(string) if isString { return str, true } } return "", false }
{ if source != nil { // See if the value was configured externally first (in VCAP_SERVICES, env var etc) // The source can be a module or a resource module, found := m.context.modules[source.Name] if found { paramValStr, ok := module[paramName] if ok { return paramValStr } } resource, found := m.context.resources[source.Name] if found { paramValStr, ok := resource[paramName] if ok { return paramValStr } } // If it was not defined externally, try to get it from the source parameters paramVal, found := getStringFromMap(source.Parameters, paramName) if found { return paramVal } } return "" }
identifier_body
mtaresolver.go
package resolver import ( "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/joho/godotenv" "github.com/pkg/errors" "github.com/SAP/cloud-mta/internal/logs" "github.com/SAP/cloud-mta/mta" ) const ( emptyModuleNameMsg = "provide a name for the module" moduleNotFoundMsg = `could not find the "%s" module` marshalFailsMag = `could not marshal the "%s" environment variable` missingPrefixMsg = `could not resolve the value for the "~{%s}" variable; missing required prefix` defaultEnvFileName = ".env" ) var envGetter = os.Environ // ResolveResult is the result of the Resolve function. This is serialized to json when requested. type ResolveResult struct { Properties map[string]string `json:"properties"` Messages []string `json:"messages"` } // Resolve - resolve module's parameters func Resolve(workspaceDir, moduleName, path string, extensions []string, envFile string) (result ResolveResult, messages []string, err error) { if len(moduleName) == 0 { return result, nil, errors.New(emptyModuleNameMsg) } mtaRaw, messages, err := mta.GetMtaFromFile(path, extensions, false) if err != nil { return result, messages, err } if len(workspaceDir) == 0 { workspaceDir = filepath.Dir(path) } // If environment file name is not provided - set the default file name to .env envFilePath := defaultEnvFileName if len(envFile) > 0 { envFilePath = envFile } m := NewMTAResolver(mtaRaw, workspaceDir) for _, module := range m.GetModules() { if module.Name == moduleName { m.ResolveProperties(module, envFilePath) propVarMap, err := getPropertiesAsEnvVar(module) if err != nil { return result, messages, err } result.Properties = propVarMap result.Messages = m.messages return result, messages, nil } } return result, messages, errors.Errorf(moduleNotFoundMsg, moduleName) } func getPropertiesAsEnvVar(module *mta.Module) (map[string]string, error) { envVar := map[string]interface{}{} for key, val := range module.Properties { envVar[key] = val } for _, requires := range module.Requires { propMap := envVar if len(requires.Group) > 0 { propMap = map[string]interface{}{} } for key, val := range requires.Properties { propMap[key] = val } if len(requires.Group) > 0 { //append the array element to group group, ok := envVar[requires.Group] if ok { groupArray := group.([]map[string]interface{}) envVar[requires.Group] = append(groupArray, propMap) } else { envVar[requires.Group] = []map[string]interface{}{propMap} } } } //serialize return serializePropertiesAsEnvVars(envVar) } func serializePropertiesAsEnvVars(envVar map[string]interface{}) (map[string]string, error) { retEnvVar := map[string]string{} for key, val := range envVar { switch v := val.(type) { case string: retEnvVar[key] = v default: bytesVal, err := json.Marshal(val) if err != nil { return nil, errors.Errorf(marshalFailsMag, key) } retEnvVar[key] = string(bytesVal) } } return retEnvVar, nil } // MTAResolver is used to resolve MTA properties' variables type MTAResolver struct { mta.MTA WorkingDir string context *ResolveContext messages []string } const resourceType = 1 const moduleType = 2 const variablePrefix = "~" const placeholderPrefix = "$" type mtaSource struct { Name string Parameters map[string]interface{} `yaml:"parameters,omitempty"` Properties map[string]interface{} `yaml:"properties,omitempty"` Type int Module *mta.Module Resource *mta.Resource } // NewMTAResolver is a factory function for MTAResolver func NewMTAResolver(m *mta.MTA, workspaceDir string) *MTAResolver { resolver := &MTAResolver{*m, workspaceDir, &ResolveContext{ global: map[string]string{}, modules: map[string]map[string]string{}, resources: map[string]map[string]string{}, }, []string{}} for _, module := range m.Modules { resolver.context.modules[module.Name] = map[string]string{} } for _, resource := range m.Resources { resolver.context.resources[resource.Name] = map[string]string{} } return resolver } func resolvePath(path string, parts ...string) string { absolutePath := path if !filepath.IsAbs(path) { absolutePath = filepath.Join(append(parts, absolutePath)...) } return absolutePath } // ResolveProperties is the main function to trigger the resolution func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) { if m.Parameters == nil { m.Parameters = map[string]interface{}{} } //add env variables for _, val := range envGetter() { pos := strings.Index(val, "=") if pos > 0 { key := strings.Trim(val[:pos], " ") value := strings.Trim(val[pos+1:], " ") m.addValueToContext(key, value) } } //add .env file in module's path to the module context if len(module.Path) > 0 { envFile := resolvePath(envFilePath, m.WorkingDir, module.Path) envMap, err := godotenv.Read(envFile) if err == nil { for key, value := range envMap { m.addValueToContext(key, value) } } } m.addServiceNames(module) //top level properties
propValue := m.resolve(module, nil, value) module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue) } //required properties: for _, req := range module.Requires { requiredSource := m.findProvider(req.Name) for propName, PropValue := range req.Properties { resolvedValue := m.resolve(module, &req, PropValue) //replace value with resolved value req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue) } } } func (m *MTAResolver) addValueToContext(key, value string) { //if the key has format of "module/key", or "resource/key" writes the value to the module's context slashPos := strings.Index(key, "/") if slashPos > 0 { modName := key[:slashPos] key = key[slashPos+1:] modulesContext, ok := m.context.modules[modName] if !ok { modulesContext, ok = m.context.resources[modName] } if ok { modulesContext[key] = value } } else { m.context.global[key] = value } } func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolve(sourceModule, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolve(sourceModule, requires, v) } return valueObj case []interface{}: for i, v := range valueObj { valueObj[i] = m.resolve(sourceModule, requires, v) } return valueObj case string: return m.resolveString(sourceModule, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} { pos := 0 pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix) if pos < 0 { //no variables return value } varValue := m.getVariableValue(sourceModule, requires, variableName) if wholeValue { return varValue } for pos >= 0 { varValueStr, _ := convertToString(varValue) value = value[:pos] + varValueStr + value[pos+len(variableName)+3:] pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix) if pos >= 0 { varValue = m.getVariableValue(sourceModule, requires, variableName) } } return value } func convertToString(valueObj interface{}) (string, bool) { switch v := valueObj.(type) { case string: return v, false } valueBytes, err := json.Marshal(convertToJSONSafe(valueObj)) if err != nil { logs.Logger.Error(err) return "", false } return string(valueBytes), true } // return start position, name of variable and if it is a whole value func parseNextVariable(pos int, value string, prefix string) (int, string, bool) { endSign := "}" posStart := strings.Index(value[pos:], prefix+"{") if posStart < 0 { return -1, "", false } posStart += pos if string(value[posStart+2]) == "{" { endSign = "}}" } posEnd := strings.Index(value[posStart+2:], endSign) if posEnd < 0 { //bad value return -1, "", false } posEnd += posStart + 1 + len(endSign) wholeValue := posStart == 0 && posEnd == len(value)-1 return posStart, value[posStart+2 : posEnd], wholeValue } func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} { var providerName string if requires == nil { slashPos := strings.Index(variableName, "/") if slashPos > 0 { providerName = variableName[:slashPos] variableName = variableName[slashPos+1:] } else { m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName)) return "~{" + variableName + "}" } } else { providerName = requires.Name } source := m.findProvider(providerName) if source != nil { for propName, propValue := range source.Properties { if propName == variableName { //Do not pass module and requires, because it is a wrong scope //it is either global->module->requires //or global->resource propValue = m.resolvePlaceholders(nil, source, nil, propValue) return convertToJSONSafe(propValue) } } } if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" { provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id") if ok { m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName)) } } return "~{" + variableName + "}" } func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolvePlaceholders(sourceModule, source, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case []interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case string: return m.resolvePlaceholdersString(sourceModule, source, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolvePlaceholdersString(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, value string) interface{} { pos := 0 pos, placeholderName, wholeValue := parseNextVariable(pos, value, placeholderPrefix) if pos < 0 { return value } placeholderValue := m.getParameter(sourceModule, source, requires, placeholderName) if wholeValue { return placeholderValue } for pos >= 0 { phValueStr, _ := convertToString(placeholderValue) value = value[:pos] + phValueStr + value[pos+len(placeholderName)+3:] pos, placeholderName, _ = parseNextVariable(pos+len(phValueStr), value, placeholderPrefix) if pos >= 0 { placeholderValue = m.getParameter(sourceModule, source, requires, placeholderName) } } return value } func (m *MTAResolver) getParameterFromSource(source *mtaSource, paramName string) string { if source != nil { // See if the value was configured externally first (in VCAP_SERVICES, env var etc) // The source can be a module or a resource module, found := m.context.modules[source.Name] if found { paramValStr, ok := module[paramName] if ok { return paramValStr } } resource, found := m.context.resources[source.Name] if found { paramValStr, ok := resource[paramName] if ok { return paramValStr } } // If it was not defined externally, try to get it from the source parameters paramVal, found := getStringFromMap(source.Parameters, paramName) if found { return paramVal } } return "" } func (m *MTAResolver) getParameter(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, paramName string) string { //first on source parameters scope paramValStr := m.getParameterFromSource(source, paramName) //first on source parameters scope if paramValStr != "" { return paramValStr } //then try on requires level if requires != nil { paramVal, ok := getStringFromMap(requires.Parameters, paramName) if ok { return paramVal } } if sourceModule != nil { paramVal, ok := getStringFromMap(sourceModule.Parameters, paramName) if ok { return paramVal } //defaults to context's module params: paramValStr, ok = m.context.modules[sourceModule.Name][paramName] if ok { return paramValStr } } //then on MTA root scope paramVal, ok := getStringFromMap(m.Parameters, paramName) if ok { return paramVal } //then global scope paramValStr, ok = m.context.global[paramName] if ok { return paramValStr } if source == nil { m.addMessage(fmt.Sprint("Missing ", paramName)) } else { m.addMessage(fmt.Sprint("Missing ", source.Name+"/"+paramName)) } return "${" + paramName + "}" } func (m *MTAResolver) findProvider(name string) *mtaSource { for _, module := range m.Modules { for _, provides := range module.Provides { if provides.Name == name { source := mtaSource{Name: module.Name, Properties: provides.Properties, Parameters: nil, Type: moduleType, Module: module} return &source } } } //in case of resource, its name is the matching to the requires name for _, resource := range m.Resources { if resource.Name == name { source := mtaSource{Name: resource.Name, Properties: resource.Properties, Parameters: resource.Parameters, Type: resourceType, Resource: resource} return &source } } return nil } func (m *MTAResolver) addMessage(message string) { // This check is necessary so the same message won't be written twice. // This happens when a placeholder references a parameter that is not defined, // because we try to resolve the parameter while resolving the placeholder and then // we try to resolve the parameter again as a parameter. if !containsString(m.messages, message) { m.messages = append(m.messages, message) } } func containsString(slice []string, value string) bool { for _, curr := range slice { if curr == value { return true } } return false } func convertToJSONSafe(val interface{}) interface{} { switch v := val.(type) { case map[interface{}]interface{}: res := map[string]interface{}{} for k, v := range v { res[fmt.Sprint(k)] = convertToJSONSafe(v) } return res case []interface{}: for k, v2 := range v { v[k] = convertToJSONSafe(v2) } return v } return val } func getStringFromMap(params map[string]interface{}, key string) (string, bool) { // Only return the parameter value if it's a string, to prevent a panic. // Note: this is used mainly for parameter values during resolve. // The deployer DOES support non-string parameters, both as the whole value // (it keeps the same type) and inside a string. // It stringifies the value in side a string but it's not the usual json stringify. // For example, if we have this string: // prop_from_resource: "this is the prop: ~{some_prop}" // And some_prop is defined like this: // stuct_field: abc // We will get a resolved value like this from the Deployer: // "this is the prop: {stuct_field=abc}" // We do not support this use case currently. value, ok := params[key] if ok && value != nil { str, isString := value.(string) if isString { return str, true } } return "", false }
for key, value := range module.Properties { //no expected variables
random_line_split
mtaresolver.go
package resolver import ( "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/joho/godotenv" "github.com/pkg/errors" "github.com/SAP/cloud-mta/internal/logs" "github.com/SAP/cloud-mta/mta" ) const ( emptyModuleNameMsg = "provide a name for the module" moduleNotFoundMsg = `could not find the "%s" module` marshalFailsMag = `could not marshal the "%s" environment variable` missingPrefixMsg = `could not resolve the value for the "~{%s}" variable; missing required prefix` defaultEnvFileName = ".env" ) var envGetter = os.Environ // ResolveResult is the result of the Resolve function. This is serialized to json when requested. type ResolveResult struct { Properties map[string]string `json:"properties"` Messages []string `json:"messages"` } // Resolve - resolve module's parameters func Resolve(workspaceDir, moduleName, path string, extensions []string, envFile string) (result ResolveResult, messages []string, err error) { if len(moduleName) == 0 { return result, nil, errors.New(emptyModuleNameMsg) } mtaRaw, messages, err := mta.GetMtaFromFile(path, extensions, false) if err != nil { return result, messages, err } if len(workspaceDir) == 0 { workspaceDir = filepath.Dir(path) } // If environment file name is not provided - set the default file name to .env envFilePath := defaultEnvFileName if len(envFile) > 0 { envFilePath = envFile } m := NewMTAResolver(mtaRaw, workspaceDir) for _, module := range m.GetModules() { if module.Name == moduleName { m.ResolveProperties(module, envFilePath) propVarMap, err := getPropertiesAsEnvVar(module) if err != nil { return result, messages, err } result.Properties = propVarMap result.Messages = m.messages return result, messages, nil } } return result, messages, errors.Errorf(moduleNotFoundMsg, moduleName) } func getPropertiesAsEnvVar(module *mta.Module) (map[string]string, error) { envVar := map[string]interface{}{} for key, val := range module.Properties { envVar[key] = val } for _, requires := range module.Requires { propMap := envVar if len(requires.Group) > 0 { propMap = map[string]interface{}{} } for key, val := range requires.Properties { propMap[key] = val } if len(requires.Group) > 0 { //append the array element to group group, ok := envVar[requires.Group] if ok { groupArray := group.([]map[string]interface{}) envVar[requires.Group] = append(groupArray, propMap) } else { envVar[requires.Group] = []map[string]interface{}{propMap} } } } //serialize return serializePropertiesAsEnvVars(envVar) } func serializePropertiesAsEnvVars(envVar map[string]interface{}) (map[string]string, error) { retEnvVar := map[string]string{} for key, val := range envVar { switch v := val.(type) { case string: retEnvVar[key] = v default: bytesVal, err := json.Marshal(val) if err != nil { return nil, errors.Errorf(marshalFailsMag, key) } retEnvVar[key] = string(bytesVal) } } return retEnvVar, nil } // MTAResolver is used to resolve MTA properties' variables type MTAResolver struct { mta.MTA WorkingDir string context *ResolveContext messages []string } const resourceType = 1 const moduleType = 2 const variablePrefix = "~" const placeholderPrefix = "$" type mtaSource struct { Name string Parameters map[string]interface{} `yaml:"parameters,omitempty"` Properties map[string]interface{} `yaml:"properties,omitempty"` Type int Module *mta.Module Resource *mta.Resource } // NewMTAResolver is a factory function for MTAResolver func NewMTAResolver(m *mta.MTA, workspaceDir string) *MTAResolver { resolver := &MTAResolver{*m, workspaceDir, &ResolveContext{ global: map[string]string{}, modules: map[string]map[string]string{}, resources: map[string]map[string]string{}, }, []string{}} for _, module := range m.Modules { resolver.context.modules[module.Name] = map[string]string{} } for _, resource := range m.Resources { resolver.context.resources[resource.Name] = map[string]string{} } return resolver } func
(path string, parts ...string) string { absolutePath := path if !filepath.IsAbs(path) { absolutePath = filepath.Join(append(parts, absolutePath)...) } return absolutePath } // ResolveProperties is the main function to trigger the resolution func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) { if m.Parameters == nil { m.Parameters = map[string]interface{}{} } //add env variables for _, val := range envGetter() { pos := strings.Index(val, "=") if pos > 0 { key := strings.Trim(val[:pos], " ") value := strings.Trim(val[pos+1:], " ") m.addValueToContext(key, value) } } //add .env file in module's path to the module context if len(module.Path) > 0 { envFile := resolvePath(envFilePath, m.WorkingDir, module.Path) envMap, err := godotenv.Read(envFile) if err == nil { for key, value := range envMap { m.addValueToContext(key, value) } } } m.addServiceNames(module) //top level properties for key, value := range module.Properties { //no expected variables propValue := m.resolve(module, nil, value) module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue) } //required properties: for _, req := range module.Requires { requiredSource := m.findProvider(req.Name) for propName, PropValue := range req.Properties { resolvedValue := m.resolve(module, &req, PropValue) //replace value with resolved value req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue) } } } func (m *MTAResolver) addValueToContext(key, value string) { //if the key has format of "module/key", or "resource/key" writes the value to the module's context slashPos := strings.Index(key, "/") if slashPos > 0 { modName := key[:slashPos] key = key[slashPos+1:] modulesContext, ok := m.context.modules[modName] if !ok { modulesContext, ok = m.context.resources[modName] } if ok { modulesContext[key] = value } } else { m.context.global[key] = value } } func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolve(sourceModule, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolve(sourceModule, requires, v) } return valueObj case []interface{}: for i, v := range valueObj { valueObj[i] = m.resolve(sourceModule, requires, v) } return valueObj case string: return m.resolveString(sourceModule, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} { pos := 0 pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix) if pos < 0 { //no variables return value } varValue := m.getVariableValue(sourceModule, requires, variableName) if wholeValue { return varValue } for pos >= 0 { varValueStr, _ := convertToString(varValue) value = value[:pos] + varValueStr + value[pos+len(variableName)+3:] pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix) if pos >= 0 { varValue = m.getVariableValue(sourceModule, requires, variableName) } } return value } func convertToString(valueObj interface{}) (string, bool) { switch v := valueObj.(type) { case string: return v, false } valueBytes, err := json.Marshal(convertToJSONSafe(valueObj)) if err != nil { logs.Logger.Error(err) return "", false } return string(valueBytes), true } // return start position, name of variable and if it is a whole value func parseNextVariable(pos int, value string, prefix string) (int, string, bool) { endSign := "}" posStart := strings.Index(value[pos:], prefix+"{") if posStart < 0 { return -1, "", false } posStart += pos if string(value[posStart+2]) == "{" { endSign = "}}" } posEnd := strings.Index(value[posStart+2:], endSign) if posEnd < 0 { //bad value return -1, "", false } posEnd += posStart + 1 + len(endSign) wholeValue := posStart == 0 && posEnd == len(value)-1 return posStart, value[posStart+2 : posEnd], wholeValue } func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} { var providerName string if requires == nil { slashPos := strings.Index(variableName, "/") if slashPos > 0 { providerName = variableName[:slashPos] variableName = variableName[slashPos+1:] } else { m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName)) return "~{" + variableName + "}" } } else { providerName = requires.Name } source := m.findProvider(providerName) if source != nil { for propName, propValue := range source.Properties { if propName == variableName { //Do not pass module and requires, because it is a wrong scope //it is either global->module->requires //or global->resource propValue = m.resolvePlaceholders(nil, source, nil, propValue) return convertToJSONSafe(propValue) } } } if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" { provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id") if ok { m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName)) } } return "~{" + variableName + "}" } func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolvePlaceholders(sourceModule, source, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case []interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case string: return m.resolvePlaceholdersString(sourceModule, source, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolvePlaceholdersString(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, value string) interface{} { pos := 0 pos, placeholderName, wholeValue := parseNextVariable(pos, value, placeholderPrefix) if pos < 0 { return value } placeholderValue := m.getParameter(sourceModule, source, requires, placeholderName) if wholeValue { return placeholderValue } for pos >= 0 { phValueStr, _ := convertToString(placeholderValue) value = value[:pos] + phValueStr + value[pos+len(placeholderName)+3:] pos, placeholderName, _ = parseNextVariable(pos+len(phValueStr), value, placeholderPrefix) if pos >= 0 { placeholderValue = m.getParameter(sourceModule, source, requires, placeholderName) } } return value } func (m *MTAResolver) getParameterFromSource(source *mtaSource, paramName string) string { if source != nil { // See if the value was configured externally first (in VCAP_SERVICES, env var etc) // The source can be a module or a resource module, found := m.context.modules[source.Name] if found { paramValStr, ok := module[paramName] if ok { return paramValStr } } resource, found := m.context.resources[source.Name] if found { paramValStr, ok := resource[paramName] if ok { return paramValStr } } // If it was not defined externally, try to get it from the source parameters paramVal, found := getStringFromMap(source.Parameters, paramName) if found { return paramVal } } return "" } func (m *MTAResolver) getParameter(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, paramName string) string { //first on source parameters scope paramValStr := m.getParameterFromSource(source, paramName) //first on source parameters scope if paramValStr != "" { return paramValStr } //then try on requires level if requires != nil { paramVal, ok := getStringFromMap(requires.Parameters, paramName) if ok { return paramVal } } if sourceModule != nil { paramVal, ok := getStringFromMap(sourceModule.Parameters, paramName) if ok { return paramVal } //defaults to context's module params: paramValStr, ok = m.context.modules[sourceModule.Name][paramName] if ok { return paramValStr } } //then on MTA root scope paramVal, ok := getStringFromMap(m.Parameters, paramName) if ok { return paramVal } //then global scope paramValStr, ok = m.context.global[paramName] if ok { return paramValStr } if source == nil { m.addMessage(fmt.Sprint("Missing ", paramName)) } else { m.addMessage(fmt.Sprint("Missing ", source.Name+"/"+paramName)) } return "${" + paramName + "}" } func (m *MTAResolver) findProvider(name string) *mtaSource { for _, module := range m.Modules { for _, provides := range module.Provides { if provides.Name == name { source := mtaSource{Name: module.Name, Properties: provides.Properties, Parameters: nil, Type: moduleType, Module: module} return &source } } } //in case of resource, its name is the matching to the requires name for _, resource := range m.Resources { if resource.Name == name { source := mtaSource{Name: resource.Name, Properties: resource.Properties, Parameters: resource.Parameters, Type: resourceType, Resource: resource} return &source } } return nil } func (m *MTAResolver) addMessage(message string) { // This check is necessary so the same message won't be written twice. // This happens when a placeholder references a parameter that is not defined, // because we try to resolve the parameter while resolving the placeholder and then // we try to resolve the parameter again as a parameter. if !containsString(m.messages, message) { m.messages = append(m.messages, message) } } func containsString(slice []string, value string) bool { for _, curr := range slice { if curr == value { return true } } return false } func convertToJSONSafe(val interface{}) interface{} { switch v := val.(type) { case map[interface{}]interface{}: res := map[string]interface{}{} for k, v := range v { res[fmt.Sprint(k)] = convertToJSONSafe(v) } return res case []interface{}: for k, v2 := range v { v[k] = convertToJSONSafe(v2) } return v } return val } func getStringFromMap(params map[string]interface{}, key string) (string, bool) { // Only return the parameter value if it's a string, to prevent a panic. // Note: this is used mainly for parameter values during resolve. // The deployer DOES support non-string parameters, both as the whole value // (it keeps the same type) and inside a string. // It stringifies the value in side a string but it's not the usual json stringify. // For example, if we have this string: // prop_from_resource: "this is the prop: ~{some_prop}" // And some_prop is defined like this: // stuct_field: abc // We will get a resolved value like this from the Deployer: // "this is the prop: {stuct_field=abc}" // We do not support this use case currently. value, ok := params[key] if ok && value != nil { str, isString := value.(string) if isString { return str, true } } return "", false }
resolvePath
identifier_name
mtaresolver.go
package resolver import ( "encoding/json" "fmt" "os" "path/filepath" "strings" "github.com/joho/godotenv" "github.com/pkg/errors" "github.com/SAP/cloud-mta/internal/logs" "github.com/SAP/cloud-mta/mta" ) const ( emptyModuleNameMsg = "provide a name for the module" moduleNotFoundMsg = `could not find the "%s" module` marshalFailsMag = `could not marshal the "%s" environment variable` missingPrefixMsg = `could not resolve the value for the "~{%s}" variable; missing required prefix` defaultEnvFileName = ".env" ) var envGetter = os.Environ // ResolveResult is the result of the Resolve function. This is serialized to json when requested. type ResolveResult struct { Properties map[string]string `json:"properties"` Messages []string `json:"messages"` } // Resolve - resolve module's parameters func Resolve(workspaceDir, moduleName, path string, extensions []string, envFile string) (result ResolveResult, messages []string, err error) { if len(moduleName) == 0 { return result, nil, errors.New(emptyModuleNameMsg) } mtaRaw, messages, err := mta.GetMtaFromFile(path, extensions, false) if err != nil { return result, messages, err } if len(workspaceDir) == 0 { workspaceDir = filepath.Dir(path) } // If environment file name is not provided - set the default file name to .env envFilePath := defaultEnvFileName if len(envFile) > 0 { envFilePath = envFile } m := NewMTAResolver(mtaRaw, workspaceDir) for _, module := range m.GetModules() { if module.Name == moduleName { m.ResolveProperties(module, envFilePath) propVarMap, err := getPropertiesAsEnvVar(module) if err != nil { return result, messages, err } result.Properties = propVarMap result.Messages = m.messages return result, messages, nil } } return result, messages, errors.Errorf(moduleNotFoundMsg, moduleName) } func getPropertiesAsEnvVar(module *mta.Module) (map[string]string, error) { envVar := map[string]interface{}{} for key, val := range module.Properties { envVar[key] = val } for _, requires := range module.Requires { propMap := envVar if len(requires.Group) > 0 { propMap = map[string]interface{}{} } for key, val := range requires.Properties { propMap[key] = val } if len(requires.Group) > 0 { //append the array element to group group, ok := envVar[requires.Group] if ok { groupArray := group.([]map[string]interface{}) envVar[requires.Group] = append(groupArray, propMap) } else { envVar[requires.Group] = []map[string]interface{}{propMap} } } } //serialize return serializePropertiesAsEnvVars(envVar) } func serializePropertiesAsEnvVars(envVar map[string]interface{}) (map[string]string, error) { retEnvVar := map[string]string{} for key, val := range envVar { switch v := val.(type) { case string: retEnvVar[key] = v default: bytesVal, err := json.Marshal(val) if err != nil { return nil, errors.Errorf(marshalFailsMag, key) } retEnvVar[key] = string(bytesVal) } } return retEnvVar, nil } // MTAResolver is used to resolve MTA properties' variables type MTAResolver struct { mta.MTA WorkingDir string context *ResolveContext messages []string } const resourceType = 1 const moduleType = 2 const variablePrefix = "~" const placeholderPrefix = "$" type mtaSource struct { Name string Parameters map[string]interface{} `yaml:"parameters,omitempty"` Properties map[string]interface{} `yaml:"properties,omitempty"` Type int Module *mta.Module Resource *mta.Resource } // NewMTAResolver is a factory function for MTAResolver func NewMTAResolver(m *mta.MTA, workspaceDir string) *MTAResolver { resolver := &MTAResolver{*m, workspaceDir, &ResolveContext{ global: map[string]string{}, modules: map[string]map[string]string{}, resources: map[string]map[string]string{}, }, []string{}} for _, module := range m.Modules { resolver.context.modules[module.Name] = map[string]string{} } for _, resource := range m.Resources { resolver.context.resources[resource.Name] = map[string]string{} } return resolver } func resolvePath(path string, parts ...string) string { absolutePath := path if !filepath.IsAbs(path) { absolutePath = filepath.Join(append(parts, absolutePath)...) } return absolutePath } // ResolveProperties is the main function to trigger the resolution func (m *MTAResolver) ResolveProperties(module *mta.Module, envFilePath string) { if m.Parameters == nil { m.Parameters = map[string]interface{}{} } //add env variables for _, val := range envGetter() { pos := strings.Index(val, "=") if pos > 0 { key := strings.Trim(val[:pos], " ") value := strings.Trim(val[pos+1:], " ") m.addValueToContext(key, value) } } //add .env file in module's path to the module context if len(module.Path) > 0 { envFile := resolvePath(envFilePath, m.WorkingDir, module.Path) envMap, err := godotenv.Read(envFile) if err == nil { for key, value := range envMap { m.addValueToContext(key, value) } } } m.addServiceNames(module) //top level properties for key, value := range module.Properties { //no expected variables propValue := m.resolve(module, nil, value) module.Properties[key] = m.resolvePlaceholders(module, nil, nil, propValue) } //required properties: for _, req := range module.Requires { requiredSource := m.findProvider(req.Name) for propName, PropValue := range req.Properties { resolvedValue := m.resolve(module, &req, PropValue) //replace value with resolved value req.Properties[propName] = m.resolvePlaceholders(module, requiredSource, &req, resolvedValue) } } } func (m *MTAResolver) addValueToContext(key, value string) { //if the key has format of "module/key", or "resource/key" writes the value to the module's context slashPos := strings.Index(key, "/") if slashPos > 0 { modName := key[:slashPos] key = key[slashPos+1:] modulesContext, ok := m.context.modules[modName] if !ok { modulesContext, ok = m.context.resources[modName] } if ok { modulesContext[key] = value } } else { m.context.global[key] = value } } func (m *MTAResolver) resolve(sourceModule *mta.Module, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolve(sourceModule, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolve(sourceModule, requires, v) } return valueObj case []interface{}: for i, v := range valueObj { valueObj[i] = m.resolve(sourceModule, requires, v) } return valueObj case string: return m.resolveString(sourceModule, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolveString(sourceModule *mta.Module, requires *mta.Requires, value string) interface{} { pos := 0 pos, variableName, wholeValue := parseNextVariable(pos, value, variablePrefix) if pos < 0 { //no variables return value } varValue := m.getVariableValue(sourceModule, requires, variableName) if wholeValue { return varValue } for pos >= 0 { varValueStr, _ := convertToString(varValue) value = value[:pos] + varValueStr + value[pos+len(variableName)+3:] pos, variableName, _ = parseNextVariable(pos+len(varValueStr), value, variablePrefix) if pos >= 0 { varValue = m.getVariableValue(sourceModule, requires, variableName) } } return value } func convertToString(valueObj interface{}) (string, bool) { switch v := valueObj.(type) { case string: return v, false } valueBytes, err := json.Marshal(convertToJSONSafe(valueObj)) if err != nil { logs.Logger.Error(err) return "", false } return string(valueBytes), true } // return start position, name of variable and if it is a whole value func parseNextVariable(pos int, value string, prefix string) (int, string, bool) { endSign := "}" posStart := strings.Index(value[pos:], prefix+"{") if posStart < 0 { return -1, "", false } posStart += pos if string(value[posStart+2]) == "{" { endSign = "}}" } posEnd := strings.Index(value[posStart+2:], endSign) if posEnd < 0 { //bad value return -1, "", false } posEnd += posStart + 1 + len(endSign) wholeValue := posStart == 0 && posEnd == len(value)-1 return posStart, value[posStart+2 : posEnd], wholeValue } func (m *MTAResolver) getVariableValue(sourceModule *mta.Module, requires *mta.Requires, variableName string) interface{} { var providerName string if requires == nil { slashPos := strings.Index(variableName, "/") if slashPos > 0 { providerName = variableName[:slashPos] variableName = variableName[slashPos+1:] } else { m.addMessage(fmt.Sprintf(missingPrefixMsg, variableName)) return "~{" + variableName + "}" } } else { providerName = requires.Name } source := m.findProvider(providerName) if source != nil { for propName, propValue := range source.Properties { if propName == variableName { //Do not pass module and requires, because it is a wrong scope //it is either global->module->requires //or global->resource propValue = m.resolvePlaceholders(nil, source, nil, propValue) return convertToJSONSafe(propValue) } } } if source != nil && source.Type == resourceType && source.Resource.Type == "configuration" { provID, ok := getStringFromMap(source.Resource.Parameters, "provider-id") if ok { m.addMessage(fmt.Sprint("Missing configuration ", provID, "/", variableName)) } } return "~{" + variableName + "}" } func (m *MTAResolver) resolvePlaceholders(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, valueObj interface{}) interface{} { switch valueObj := valueObj.(type) { case map[interface{}]interface{}: v := convertToJSONSafe(valueObj) return m.resolvePlaceholders(sourceModule, source, requires, v) case map[string]interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case []interface{}: for k, v := range valueObj { valueObj[k] = m.resolvePlaceholders(sourceModule, source, requires, v) } return valueObj case string: return m.resolvePlaceholdersString(sourceModule, source, requires, valueObj) default: //if the value is not a string but a leaf, just return it return valueObj } } func (m *MTAResolver) resolvePlaceholdersString(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, value string) interface{} { pos := 0 pos, placeholderName, wholeValue := parseNextVariable(pos, value, placeholderPrefix) if pos < 0 { return value } placeholderValue := m.getParameter(sourceModule, source, requires, placeholderName) if wholeValue { return placeholderValue } for pos >= 0 { phValueStr, _ := convertToString(placeholderValue) value = value[:pos] + phValueStr + value[pos+len(placeholderName)+3:] pos, placeholderName, _ = parseNextVariable(pos+len(phValueStr), value, placeholderPrefix) if pos >= 0 { placeholderValue = m.getParameter(sourceModule, source, requires, placeholderName) } } return value } func (m *MTAResolver) getParameterFromSource(source *mtaSource, paramName string) string { if source != nil { // See if the value was configured externally first (in VCAP_SERVICES, env var etc) // The source can be a module or a resource module, found := m.context.modules[source.Name] if found { paramValStr, ok := module[paramName] if ok { return paramValStr } } resource, found := m.context.resources[source.Name] if found { paramValStr, ok := resource[paramName] if ok { return paramValStr } } // If it was not defined externally, try to get it from the source parameters paramVal, found := getStringFromMap(source.Parameters, paramName) if found { return paramVal } } return "" } func (m *MTAResolver) getParameter(sourceModule *mta.Module, source *mtaSource, requires *mta.Requires, paramName string) string { //first on source parameters scope paramValStr := m.getParameterFromSource(source, paramName) //first on source parameters scope if paramValStr != "" { return paramValStr } //then try on requires level if requires != nil { paramVal, ok := getStringFromMap(requires.Parameters, paramName) if ok { return paramVal } } if sourceModule != nil { paramVal, ok := getStringFromMap(sourceModule.Parameters, paramName) if ok { return paramVal } //defaults to context's module params: paramValStr, ok = m.context.modules[sourceModule.Name][paramName] if ok { return paramValStr } } //then on MTA root scope paramVal, ok := getStringFromMap(m.Parameters, paramName) if ok { return paramVal } //then global scope paramValStr, ok = m.context.global[paramName] if ok { return paramValStr } if source == nil { m.addMessage(fmt.Sprint("Missing ", paramName)) } else { m.addMessage(fmt.Sprint("Missing ", source.Name+"/"+paramName)) } return "${" + paramName + "}" } func (m *MTAResolver) findProvider(name string) *mtaSource { for _, module := range m.Modules { for _, provides := range module.Provides { if provides.Name == name { source := mtaSource{Name: module.Name, Properties: provides.Properties, Parameters: nil, Type: moduleType, Module: module} return &source } } } //in case of resource, its name is the matching to the requires name for _, resource := range m.Resources { if resource.Name == name { source := mtaSource{Name: resource.Name, Properties: resource.Properties, Parameters: resource.Parameters, Type: resourceType, Resource: resource} return &source } } return nil } func (m *MTAResolver) addMessage(message string) { // This check is necessary so the same message won't be written twice. // This happens when a placeholder references a parameter that is not defined, // because we try to resolve the parameter while resolving the placeholder and then // we try to resolve the parameter again as a parameter. if !containsString(m.messages, message) { m.messages = append(m.messages, message) } } func containsString(slice []string, value string) bool { for _, curr := range slice
return false } func convertToJSONSafe(val interface{}) interface{} { switch v := val.(type) { case map[interface{}]interface{}: res := map[string]interface{}{} for k, v := range v { res[fmt.Sprint(k)] = convertToJSONSafe(v) } return res case []interface{}: for k, v2 := range v { v[k] = convertToJSONSafe(v2) } return v } return val } func getStringFromMap(params map[string]interface{}, key string) (string, bool) { // Only return the parameter value if it's a string, to prevent a panic. // Note: this is used mainly for parameter values during resolve. // The deployer DOES support non-string parameters, both as the whole value // (it keeps the same type) and inside a string. // It stringifies the value in side a string but it's not the usual json stringify. // For example, if we have this string: // prop_from_resource: "this is the prop: ~{some_prop}" // And some_prop is defined like this: // stuct_field: abc // We will get a resolved value like this from the Deployer: // "this is the prop: {stuct_field=abc}" // We do not support this use case currently. value, ok := params[key] if ok && value != nil { str, isString := value.(string) if isString { return str, true } } return "", false }
{ if curr == value { return true } }
conditional_block
stlib.py
# Library for Stalker project #Libraries import pandas as pd import seaborn as sns from IPython.display import Image, display import matplotlib.pyplot as plt # Google search from googlesearch import search # Tldextract to get domain of url import tldextract as tld # BeautifulSoup from bs4 import BeautifulSoup as bs from bs4.element import Comment import urllib.request # NLTK to analyze webs import nltk from nltk.corpus import stopwords from nltk import FreqDist from nltk.tokenize import word_tokenize # Find close matches from difflib import get_close_matches # Sentiment analysis from textblob import TextBlob # Twitter sentiment analysis import tweepy # News API from newsapi import NewsApiClient # Credentials import credentials as cd # Finding info in APIs newsapi = NewsApiClient(api_key=cd.news_credentials['api_key']) news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch' # Twitter API consumer_key = cd.twitter_credentials['consumer_key'] consumer_key_secret = cd.twitter_credentials['consumer_key_secret'] access_token = cd.twitter_credentials['access_token'] access_token_secret = cd.twitter_credentials['access_token_secret'] auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) # Finding query on Google # Finding related urls def find_webs(query): urls = [] rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel'] sites = [] red_social = False for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'): if len(urls)<10: for rs in rrss: if rs in s or tld.extract(s).domain in sites: red_social = True if not red_social and s not in urls: urls.append(s) sites.append(tld.extract(s).domain) red_social = False return urls def tag_visible(element): if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def text_from_html(body): soup = bs(body, 'html.parser') texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return u" ".join(t.strip() for t in visible_texts) def cleaning_urls_text(url): try: html = text_from_html(urllib.request.urlopen(url).read()) stop_words = set(stopwords.words('english')) word_tokens = word_tokenize(html) return [w for w in word_tokens if not w in stop_words] except: return [] def filter_warning_words(sentence): warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide', 'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing'] return list(filter(lambda word: word in warning_word, sentence)) def warnings_count(url): clean_sentence = cleaning_urls_text(url) length = len(filter_warning_words(clean_sentence))
list_len_tup_clean.sort(key = lambda item: item[1], reverse=True) top_urls = [url for url, length in list_len_tup_clean[:2]] if len(top_urls) > 1: print(f""" We found something sketchy. You might want to check these links: - {top_urls[0]} - {top_urls[1]} """) elif len(top_urls) == 1: print(f""" We found something sketchy. You might want to check this link: {top_urls[0]} """) else: print(f"We couldn't find anything worrying about {look_for} on Google. Nice!") # Input correction def retrieve_name(my_name, companies): companies_list = [] for i in companies.dropna(subset=['name']).name: companies_list.append(i) if my_name in companies_list: return my_name elif len(get_close_matches(my_name, companies_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0]) if (action == "y"): return get_close_matches(my_name, companies_list)[0] elif (action == "n"): return my_name else: return("we don't understand you. Apologies.") def retrieve_sector(my_sector, investments): investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list']) sector_list0 = [] sector_list = [] for item in investments['company_category_list']: if ',' in item: sector_list0.append(item.split(sep=', ')) else: sector_list0.append(item) for i in sector_list0: if type(i) == list: for sec in i: sector_list.append(sec) else: sector_list.append(i) if my_sector in sector_list: return my_sector elif len(get_close_matches(my_sector, sector_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0]) if (action == "y"): return get_close_matches(my_sector, sector_list)[0] else: return my_sector # Sentiment analysis tweeter def tw_sent_sector(public_tweets, sector): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {sector} industry in Twitter is {sent}") # Sentiment analysis news def news_sentiment_sector(public_news, sector): news_list = [] for piece in range(len(public_news['articles'])): news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0]) news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0]) if sum(news_list)>0: news_sent = 'Positive' elif sum(news_list)<0: news_sent = 'Negative' else: news_sent = 'Neutral' print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}") # Look for data about sector def category(sector, investments): # Gather tweets public_tweets = api.search(sector) # Gather news public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en') # Prepare the data for the sector investments = investments.dropna(subset=['company_category_list']) sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1) sector_investments.reset_index(drop=True) sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at']) sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year ) sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month ) sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day ) # Sentiment analysis Twitter tw_sent_sector(public_tweets, sector) # Sentiment analysis News news_sentiment_sector(public_news, sector) # create plot sector_year = sector_investments.groupby(['Year']).sum()[-10:] movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100) if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0: in_dec = 'increased' grow = 'growing' else: in_dec = 'decreased' grow = 'falling' movement = movement[1:] sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}') investments_per_year = sector_investments.groupby(['Year']).count() peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list() peak_amount = max(sector_year.raised_amount_usd) #peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list() low_amount = min(sector_year.raised_amount_usd) most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False) low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list() format_doll = ',.2f' print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years. It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested. """) plt.ylabel('Raised amount in USD') plt.show() sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}') plt.ylabel('Number of investments') #print("""Plot explanaition average investment """) plt.show() #print(f""" # The Top 3 companies with biggest investments are: #- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised, #- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and #- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised #""") # Sentiment analysis founder def tw_analysis_founder(public_tweets, founder): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {founder} in Twitter is {sent}") # Look for data about the founder def founders(founder, people): full_name = founder.split() public_tweets = api.search(founder) # What to search on Google look_for = founder for i in range(len(people)): if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]: display(Image(url=people.profile_image_url[i])) print(f'We found this information about {founder}:') print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ") print(f"Title: {people.title[i]}") print(f"Organization: {people.organization[i]}") print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}") if people.twitter_url[i] != None: print(f"Twitter URL: {people.twitter_url[i]}") if people.linkedin_url[i] != None: print(f"Linkedin URL: {people.linkedin_url[i]}") if people.facebook_url[i] != None: print(f"Facebook URL: {people.facebook_url[i]}") # Twitter analysis tw_analysis_founder(public_tweets, founder) # Google search most_warnings(find_webs(founder), look_for) # Look for data about company def find_companies_by_size(size, companies, name, sector, company): company_nan = companies.dropna() company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna() company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big']) if name in company_nan['name']: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample() else: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample() def competitor_info(company): print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") print(f"Founded in: {company.founded_at.item()}") # Sentiment analysis company def tw_analysis_company(public_tweets, company): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {company} in Twitter is {sent}") def startup(name, companies, sector): company = companies[companies['name'] == name] # What to search on Google look_for = name # Gather tweets public_tweets = api.search(name) try: print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") # Find competitors print('\n') print(f"Competitors similar to {company.name.item()}:") print('\n') competitor_info(find_companies_by_size('small', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('medium', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('big', companies, name, sector, company)) except: print(f"We couldn't find information about {name} in Crunchbase") #Twitter sentiment analysis for company tw_analysis_company(public_tweets, name) # Google search most_warnings(find_webs(name), look_for)
return (url, length) if length != 0 else None def most_warnings(urls, look_for): list_len_tup = list(map(warnings_count, urls)) list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup))
random_line_split
stlib.py
# Library for Stalker project #Libraries import pandas as pd import seaborn as sns from IPython.display import Image, display import matplotlib.pyplot as plt # Google search from googlesearch import search # Tldextract to get domain of url import tldextract as tld # BeautifulSoup from bs4 import BeautifulSoup as bs from bs4.element import Comment import urllib.request # NLTK to analyze webs import nltk from nltk.corpus import stopwords from nltk import FreqDist from nltk.tokenize import word_tokenize # Find close matches from difflib import get_close_matches # Sentiment analysis from textblob import TextBlob # Twitter sentiment analysis import tweepy # News API from newsapi import NewsApiClient # Credentials import credentials as cd # Finding info in APIs newsapi = NewsApiClient(api_key=cd.news_credentials['api_key']) news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch' # Twitter API consumer_key = cd.twitter_credentials['consumer_key'] consumer_key_secret = cd.twitter_credentials['consumer_key_secret'] access_token = cd.twitter_credentials['access_token'] access_token_secret = cd.twitter_credentials['access_token_secret'] auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) # Finding query on Google # Finding related urls def find_webs(query): urls = [] rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel'] sites = [] red_social = False for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'): if len(urls)<10: for rs in rrss: if rs in s or tld.extract(s).domain in sites: red_social = True if not red_social and s not in urls: urls.append(s) sites.append(tld.extract(s).domain) red_social = False return urls def tag_visible(element): if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def text_from_html(body): soup = bs(body, 'html.parser') texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return u" ".join(t.strip() for t in visible_texts) def cleaning_urls_text(url): try: html = text_from_html(urllib.request.urlopen(url).read()) stop_words = set(stopwords.words('english')) word_tokens = word_tokenize(html) return [w for w in word_tokens if not w in stop_words] except: return [] def filter_warning_words(sentence): warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide', 'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing'] return list(filter(lambda word: word in warning_word, sentence)) def warnings_count(url): clean_sentence = cleaning_urls_text(url) length = len(filter_warning_words(clean_sentence)) return (url, length) if length != 0 else None def most_warnings(urls, look_for): list_len_tup = list(map(warnings_count, urls)) list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup)) list_len_tup_clean.sort(key = lambda item: item[1], reverse=True) top_urls = [url for url, length in list_len_tup_clean[:2]] if len(top_urls) > 1: print(f""" We found something sketchy. You might want to check these links: - {top_urls[0]} - {top_urls[1]} """) elif len(top_urls) == 1: print(f""" We found something sketchy. You might want to check this link: {top_urls[0]} """) else: print(f"We couldn't find anything worrying about {look_for} on Google. Nice!") # Input correction def retrieve_name(my_name, companies): companies_list = [] for i in companies.dropna(subset=['name']).name: companies_list.append(i) if my_name in companies_list: return my_name elif len(get_close_matches(my_name, companies_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0]) if (action == "y"): return get_close_matches(my_name, companies_list)[0] elif (action == "n"): return my_name else: return("we don't understand you. Apologies.") def retrieve_sector(my_sector, investments): investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list']) sector_list0 = [] sector_list = [] for item in investments['company_category_list']: if ',' in item: sector_list0.append(item.split(sep=', ')) else: sector_list0.append(item) for i in sector_list0: if type(i) == list: for sec in i: sector_list.append(sec) else: sector_list.append(i) if my_sector in sector_list: return my_sector elif len(get_close_matches(my_sector, sector_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0]) if (action == "y"): return get_close_matches(my_sector, sector_list)[0] else: return my_sector # Sentiment analysis tweeter def tw_sent_sector(public_tweets, sector): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {sector} industry in Twitter is {sent}") # Sentiment analysis news def news_sentiment_sector(public_news, sector): news_list = [] for piece in range(len(public_news['articles'])): news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0]) news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0]) if sum(news_list)>0: news_sent = 'Positive' elif sum(news_list)<0: news_sent = 'Negative' else: news_sent = 'Neutral' print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}") # Look for data about sector def category(sector, investments): # Gather tweets public_tweets = api.search(sector) # Gather news public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en') # Prepare the data for the sector investments = investments.dropna(subset=['company_category_list']) sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1) sector_investments.reset_index(drop=True) sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at']) sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year ) sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month ) sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day ) # Sentiment analysis Twitter tw_sent_sector(public_tweets, sector) # Sentiment analysis News news_sentiment_sector(public_news, sector) # create plot sector_year = sector_investments.groupby(['Year']).sum()[-10:] movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100) if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0: in_dec = 'increased' grow = 'growing' else: in_dec = 'decreased' grow = 'falling' movement = movement[1:] sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}') investments_per_year = sector_investments.groupby(['Year']).count() peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list() peak_amount = max(sector_year.raised_amount_usd) #peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list() low_amount = min(sector_year.raised_amount_usd) most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False) low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list() format_doll = ',.2f' print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years. It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested. """) plt.ylabel('Raised amount in USD') plt.show() sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}') plt.ylabel('Number of investments') #print("""Plot explanaition average investment """) plt.show() #print(f""" # The Top 3 companies with biggest investments are: #- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised, #- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and #- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised #""") # Sentiment analysis founder def tw_analysis_founder(public_tweets, founder): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {founder} in Twitter is {sent}") # Look for data about the founder def founders(founder, people): full_name = founder.split() public_tweets = api.search(founder) # What to search on Google look_for = founder for i in range(len(people)): if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]: display(Image(url=people.profile_image_url[i])) print(f'We found this information about {founder}:') print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ") print(f"Title: {people.title[i]}") print(f"Organization: {people.organization[i]}") print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}") if people.twitter_url[i] != None: print(f"Twitter URL: {people.twitter_url[i]}") if people.linkedin_url[i] != None: print(f"Linkedin URL: {people.linkedin_url[i]}") if people.facebook_url[i] != None: print(f"Facebook URL: {people.facebook_url[i]}") # Twitter analysis tw_analysis_founder(public_tweets, founder) # Google search most_warnings(find_webs(founder), look_for) # Look for data about company def
(size, companies, name, sector, company): company_nan = companies.dropna() company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna() company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big']) if name in company_nan['name']: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample() else: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample() def competitor_info(company): print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") print(f"Founded in: {company.founded_at.item()}") # Sentiment analysis company def tw_analysis_company(public_tweets, company): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {company} in Twitter is {sent}") def startup(name, companies, sector): company = companies[companies['name'] == name] # What to search on Google look_for = name # Gather tweets public_tweets = api.search(name) try: print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") # Find competitors print('\n') print(f"Competitors similar to {company.name.item()}:") print('\n') competitor_info(find_companies_by_size('small', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('medium', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('big', companies, name, sector, company)) except: print(f"We couldn't find information about {name} in Crunchbase") #Twitter sentiment analysis for company tw_analysis_company(public_tweets, name) # Google search most_warnings(find_webs(name), look_for)
find_companies_by_size
identifier_name
stlib.py
# Library for Stalker project #Libraries import pandas as pd import seaborn as sns from IPython.display import Image, display import matplotlib.pyplot as plt # Google search from googlesearch import search # Tldextract to get domain of url import tldextract as tld # BeautifulSoup from bs4 import BeautifulSoup as bs from bs4.element import Comment import urllib.request # NLTK to analyze webs import nltk from nltk.corpus import stopwords from nltk import FreqDist from nltk.tokenize import word_tokenize # Find close matches from difflib import get_close_matches # Sentiment analysis from textblob import TextBlob # Twitter sentiment analysis import tweepy # News API from newsapi import NewsApiClient # Credentials import credentials as cd # Finding info in APIs newsapi = NewsApiClient(api_key=cd.news_credentials['api_key']) news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch' # Twitter API consumer_key = cd.twitter_credentials['consumer_key'] consumer_key_secret = cd.twitter_credentials['consumer_key_secret'] access_token = cd.twitter_credentials['access_token'] access_token_secret = cd.twitter_credentials['access_token_secret'] auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) # Finding query on Google # Finding related urls def find_webs(query): urls = [] rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel'] sites = [] red_social = False for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'): if len(urls)<10: for rs in rrss: if rs in s or tld.extract(s).domain in sites: red_social = True if not red_social and s not in urls: urls.append(s) sites.append(tld.extract(s).domain) red_social = False return urls def tag_visible(element): if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def text_from_html(body): soup = bs(body, 'html.parser') texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return u" ".join(t.strip() for t in visible_texts) def cleaning_urls_text(url): try: html = text_from_html(urllib.request.urlopen(url).read()) stop_words = set(stopwords.words('english')) word_tokens = word_tokenize(html) return [w for w in word_tokens if not w in stop_words] except: return [] def filter_warning_words(sentence): warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide', 'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing'] return list(filter(lambda word: word in warning_word, sentence)) def warnings_count(url): clean_sentence = cleaning_urls_text(url) length = len(filter_warning_words(clean_sentence)) return (url, length) if length != 0 else None def most_warnings(urls, look_for): list_len_tup = list(map(warnings_count, urls)) list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup)) list_len_tup_clean.sort(key = lambda item: item[1], reverse=True) top_urls = [url for url, length in list_len_tup_clean[:2]] if len(top_urls) > 1: print(f""" We found something sketchy. You might want to check these links: - {top_urls[0]} - {top_urls[1]} """) elif len(top_urls) == 1: print(f""" We found something sketchy. You might want to check this link: {top_urls[0]} """) else: print(f"We couldn't find anything worrying about {look_for} on Google. Nice!") # Input correction def retrieve_name(my_name, companies): companies_list = [] for i in companies.dropna(subset=['name']).name: companies_list.append(i) if my_name in companies_list: return my_name elif len(get_close_matches(my_name, companies_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0]) if (action == "y"): return get_close_matches(my_name, companies_list)[0] elif (action == "n"): return my_name else: return("we don't understand you. Apologies.") def retrieve_sector(my_sector, investments): investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list']) sector_list0 = [] sector_list = [] for item in investments['company_category_list']: if ',' in item: sector_list0.append(item.split(sep=', ')) else: sector_list0.append(item) for i in sector_list0: if type(i) == list: for sec in i: sector_list.append(sec) else: sector_list.append(i) if my_sector in sector_list: return my_sector elif len(get_close_matches(my_sector, sector_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0]) if (action == "y"): return get_close_matches(my_sector, sector_list)[0] else: return my_sector # Sentiment analysis tweeter def tw_sent_sector(public_tweets, sector): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {sector} industry in Twitter is {sent}") # Sentiment analysis news def news_sentiment_sector(public_news, sector):
# Look for data about sector def category(sector, investments): # Gather tweets public_tweets = api.search(sector) # Gather news public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en') # Prepare the data for the sector investments = investments.dropna(subset=['company_category_list']) sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1) sector_investments.reset_index(drop=True) sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at']) sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year ) sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month ) sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day ) # Sentiment analysis Twitter tw_sent_sector(public_tweets, sector) # Sentiment analysis News news_sentiment_sector(public_news, sector) # create plot sector_year = sector_investments.groupby(['Year']).sum()[-10:] movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100) if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0: in_dec = 'increased' grow = 'growing' else: in_dec = 'decreased' grow = 'falling' movement = movement[1:] sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}') investments_per_year = sector_investments.groupby(['Year']).count() peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list() peak_amount = max(sector_year.raised_amount_usd) #peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list() low_amount = min(sector_year.raised_amount_usd) most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False) low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list() format_doll = ',.2f' print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years. It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested. """) plt.ylabel('Raised amount in USD') plt.show() sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}') plt.ylabel('Number of investments') #print("""Plot explanaition average investment """) plt.show() #print(f""" # The Top 3 companies with biggest investments are: #- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised, #- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and #- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised #""") # Sentiment analysis founder def tw_analysis_founder(public_tweets, founder): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {founder} in Twitter is {sent}") # Look for data about the founder def founders(founder, people): full_name = founder.split() public_tweets = api.search(founder) # What to search on Google look_for = founder for i in range(len(people)): if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]: display(Image(url=people.profile_image_url[i])) print(f'We found this information about {founder}:') print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ") print(f"Title: {people.title[i]}") print(f"Organization: {people.organization[i]}") print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}") if people.twitter_url[i] != None: print(f"Twitter URL: {people.twitter_url[i]}") if people.linkedin_url[i] != None: print(f"Linkedin URL: {people.linkedin_url[i]}") if people.facebook_url[i] != None: print(f"Facebook URL: {people.facebook_url[i]}") # Twitter analysis tw_analysis_founder(public_tweets, founder) # Google search most_warnings(find_webs(founder), look_for) # Look for data about company def find_companies_by_size(size, companies, name, sector, company): company_nan = companies.dropna() company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna() company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big']) if name in company_nan['name']: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample() else: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample() def competitor_info(company): print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") print(f"Founded in: {company.founded_at.item()}") # Sentiment analysis company def tw_analysis_company(public_tweets, company): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {company} in Twitter is {sent}") def startup(name, companies, sector): company = companies[companies['name'] == name] # What to search on Google look_for = name # Gather tweets public_tweets = api.search(name) try: print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") # Find competitors print('\n') print(f"Competitors similar to {company.name.item()}:") print('\n') competitor_info(find_companies_by_size('small', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('medium', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('big', companies, name, sector, company)) except: print(f"We couldn't find information about {name} in Crunchbase") #Twitter sentiment analysis for company tw_analysis_company(public_tweets, name) # Google search most_warnings(find_webs(name), look_for)
news_list = [] for piece in range(len(public_news['articles'])): news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0]) news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0]) if sum(news_list)>0: news_sent = 'Positive' elif sum(news_list)<0: news_sent = 'Negative' else: news_sent = 'Neutral' print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}")
identifier_body
stlib.py
# Library for Stalker project #Libraries import pandas as pd import seaborn as sns from IPython.display import Image, display import matplotlib.pyplot as plt # Google search from googlesearch import search # Tldextract to get domain of url import tldextract as tld # BeautifulSoup from bs4 import BeautifulSoup as bs from bs4.element import Comment import urllib.request # NLTK to analyze webs import nltk from nltk.corpus import stopwords from nltk import FreqDist from nltk.tokenize import word_tokenize # Find close matches from difflib import get_close_matches # Sentiment analysis from textblob import TextBlob # Twitter sentiment analysis import tweepy # News API from newsapi import NewsApiClient # Credentials import credentials as cd # Finding info in APIs newsapi = NewsApiClient(api_key=cd.news_credentials['api_key']) news_sources = 'the-verge,buzzfeed,engadget,hacker-news,mashable,reddit-r-all,wired,techcrunch' # Twitter API consumer_key = cd.twitter_credentials['consumer_key'] consumer_key_secret = cd.twitter_credentials['consumer_key_secret'] access_token = cd.twitter_credentials['access_token'] access_token_secret = cd.twitter_credentials['access_token_secret'] auth = tweepy.OAuthHandler(consumer_key, consumer_key_secret) auth.set_access_token(access_token, access_token_secret) api = tweepy.API(auth) # Finding query on Google # Finding related urls def find_webs(query): urls = [] rrss = ['facebook', 'twitter', 'linkedin', 'instagram', 'youtube','pinterest','angel'] sites = [] red_social = False for s in search(query, tld="com", num=30, stop=30, pause=3, lang='en'): if len(urls)<10: for rs in rrss: if rs in s or tld.extract(s).domain in sites: red_social = True if not red_social and s not in urls: urls.append(s) sites.append(tld.extract(s).domain) red_social = False return urls def tag_visible(element): if element.parent.name in ['style', 'script', 'head', 'title', 'meta', '[document]']: return False if isinstance(element, Comment): return False return True def text_from_html(body): soup = bs(body, 'html.parser') texts = soup.findAll(text=True) visible_texts = filter(tag_visible, texts) return u" ".join(t.strip() for t in visible_texts) def cleaning_urls_text(url): try: html = text_from_html(urllib.request.urlopen(url).read()) stop_words = set(stopwords.words('english')) word_tokens = word_tokenize(html) return [w for w in word_tokens if not w in stop_words] except: return [] def filter_warning_words(sentence): warning_word = ['lie', 'fraud', 'scam', 'extortion', 'deceit', 'crime','arson', 'assault', 'bigamy', 'blackmail', 'bribery', 'burglary', 'child abuse', 'conspiracy', 'espionage', 'forgery', 'fraud', 'genocide', 'hijacking','homicide', 'kidnapping', 'manslaughter', 'mugging', 'murder', 'perjury', 'rape', 'riot', 'robbery', 'shoplifting', 'slander', 'smuggling', 'treason', 'trespassing'] return list(filter(lambda word: word in warning_word, sentence)) def warnings_count(url): clean_sentence = cleaning_urls_text(url) length = len(filter_warning_words(clean_sentence)) return (url, length) if length != 0 else None def most_warnings(urls, look_for): list_len_tup = list(map(warnings_count, urls)) list_len_tup_clean = list(filter(lambda item: item != None, list_len_tup)) list_len_tup_clean.sort(key = lambda item: item[1], reverse=True) top_urls = [url for url, length in list_len_tup_clean[:2]] if len(top_urls) > 1: print(f""" We found something sketchy. You might want to check these links: - {top_urls[0]} - {top_urls[1]} """) elif len(top_urls) == 1: print(f""" We found something sketchy. You might want to check this link: {top_urls[0]} """) else: print(f"We couldn't find anything worrying about {look_for} on Google. Nice!") # Input correction def retrieve_name(my_name, companies): companies_list = [] for i in companies.dropna(subset=['name']).name: companies_list.append(i) if my_name in companies_list: return my_name elif len(get_close_matches(my_name, companies_list)) > 0:
def retrieve_sector(my_sector, investments): investments = investments.dropna(subset=['raised_amount_usd', 'company_category_list']) sector_list0 = [] sector_list = [] for item in investments['company_category_list']: if ',' in item: sector_list0.append(item.split(sep=', ')) else: sector_list0.append(item) for i in sector_list0: if type(i) == list: for sec in i: sector_list.append(sec) else: sector_list.append(i) if my_sector in sector_list: return my_sector elif len(get_close_matches(my_sector, sector_list)) > 0: action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_sector, sector_list) [0]) if (action == "y"): return get_close_matches(my_sector, sector_list)[0] else: return my_sector # Sentiment analysis tweeter def tw_sent_sector(public_tweets, sector): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {sector} industry in Twitter is {sent}") # Sentiment analysis news def news_sentiment_sector(public_news, sector): news_list = [] for piece in range(len(public_news['articles'])): news_list.append(TextBlob(public_news['articles'][piece]['title']).sentiment[0]) news_list.append(TextBlob(public_news['articles'][piece]['description']).sentiment[0]) if sum(news_list)>0: news_sent = 'Positive' elif sum(news_list)<0: news_sent = 'Negative' else: news_sent = 'Neutral' print(f"There have been {len(public_news)} news pieces about {sector} industry recently and are in general {news_sent}") # Look for data about sector def category(sector, investments): # Gather tweets public_tweets = api.search(sector) # Gather news public_news = newsapi.get_everything(q=sector,sources=news_sources,language='en') # Prepare the data for the sector investments = investments.dropna(subset=['company_category_list']) sector_investments = investments[investments['company_category_list'].str.contains(sector)].drop('index',axis=1) sector_investments.reset_index(drop=True) sector_investments['funded_at'] = pd.to_datetime(sector_investments['funded_at']) sector_investments['Year'] = sector_investments['funded_at'].apply(lambda x: x.year ) sector_investments['Month'] = sector_investments['funded_at'].apply(lambda x: x.month ) sector_investments['Day'] = sector_investments['funded_at'].apply(lambda x: x.day ) # Sentiment analysis Twitter tw_sent_sector(public_tweets, sector) # Sentiment analysis News news_sentiment_sector(public_news, sector) # create plot sector_year = sector_investments.groupby(['Year']).sum()[-10:] movement = ((sector_year.raised_amount_usd.iloc[len(sector_year)-1] -sector_year.raised_amount_usd.iloc[0])/sector_year.raised_amount_usd.iloc[0]*100) if sector_year.raised_amount_usd.iloc[0] + sector_year.raised_amount_usd.iloc[len(sector_year)-1] >= 0: in_dec = 'increased' grow = 'growing' else: in_dec = 'decreased' grow = 'falling' movement = movement[1:] sns.lineplot(x=sector_year.index, y=sector_year.raised_amount_usd).set_title(f'Evolution of the amount invested in {sector}') investments_per_year = sector_investments.groupby(['Year']).count() peak_year = sector_year.index[sector_year['raised_amount_usd']== max(sector_year.raised_amount_usd)].to_list() peak_amount = max(sector_year.raised_amount_usd) #peak_year_invest = investments_per_year.index[investments_per_year['raised_amount_usd']== max(investments_per_year.raised_amount_usd)].to_list() low_amount = min(sector_year.raised_amount_usd) most_invested_companies = sector_investments.groupby(by='company_name').sum().sort_values(by='raised_amount_usd', ascending=False) low_year = sector_year.index[sector_year['raised_amount_usd']== min(sector_year.raised_amount_usd)].to_list() format_doll = ',.2f' print(f"""The amount of money invested in {sector} companies has {in_dec} by {format(abs(movement),format_doll)}% in the last {len(sector_year)} years. It peaked in year {peak_year[0]} with ${format(peak_amount,format_doll)} invested and its lowest point was in year {low_year[0]} with ${format(low_amount,format_doll)} invested. """) plt.ylabel('Raised amount in USD') plt.show() sns.lineplot(x=investments_per_year.index[-10:], y=investments_per_year.Day[-10:]).set_title(f'Evolution of the number of investment in {sector}') plt.ylabel('Number of investments') #print("""Plot explanaition average investment """) plt.show() #print(f""" # The Top 3 companies with biggest investments are: #- {most_invested_companies.index[0]} with ${most_invested_companies.raised_amount_usd[0]} raised, #- {most_invested_companies.index[1]} with ${most_invested_companies.raised_amount_usd[1]} raised and #- {most_invested_companies.index[2]} with ${most_invested_companies.raised_amount_usd[2]} raised #""") # Sentiment analysis founder def tw_analysis_founder(public_tweets, founder): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {founder} in Twitter is {sent}") # Look for data about the founder def founders(founder, people): full_name = founder.split() public_tweets = api.search(founder) # What to search on Google look_for = founder for i in range(len(people)): if people.first_name.iloc[i] == full_name[0] and people.last_name.iloc[i]==full_name[1]: display(Image(url=people.profile_image_url[i])) print(f'We found this information about {founder}:') print(f"Founder's name: {people.first_name[i]} {people.last_name[i]} ") print(f"Title: {people.title[i]}") print(f"Organization: {people.organization[i]}") print(f"Location: {people.location_city[i]}, {people.location_region[i]}, {people.location_country_code[i]}") if people.twitter_url[i] != None: print(f"Twitter URL: {people.twitter_url[i]}") if people.linkedin_url[i] != None: print(f"Linkedin URL: {people.linkedin_url[i]}") if people.facebook_url[i] != None: print(f"Facebook URL: {people.facebook_url[i]}") # Twitter analysis tw_analysis_founder(public_tweets, founder) # Google search most_warnings(find_webs(founder), look_for) # Look for data about company def find_companies_by_size(size, companies, name, sector, company): company_nan = companies.dropna() company_sector = company_nan[company_nan['category_list'].str.contains(sector)].drop('index',axis=1).dropna() company_sector['total_funding_size']=pd.qcut(company_sector.funding_total_usd, q=[0, .25, .75, 1], labels=['small', 'medium', 'big']) if name in company_nan['name']: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')& (company_sector['country_code']==company.country_code)].sample() else: return company_sector[(company_sector['total_funding_size']==size)& (company_sector['funding_total_usd'] > 100000) & (company_sector['status'] != 'closed')].sample() def competitor_info(company): print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") print(f"Founded in: {company.founded_at.item()}") # Sentiment analysis company def tw_analysis_company(public_tweets, company): sentiment_list = [] for tweet in public_tweets: analysis = TextBlob(tweet.text) sentiment_list.append(analysis.sentiment[0]) if sum(sentiment_list)>0: sent = 'Positive' elif sum(sentiment_list)<0: sent = 'Negative' else: sent = 'Neutral' print(f"The sentiment about {company} in Twitter is {sent}") def startup(name, companies, sector): company = companies[companies['name'] == name] # What to search on Google look_for = name # Gather tweets public_tweets = api.search(name) try: print(f"Company name: {company.name.item()}") print(f"Total money raised: ${format(company.funding_total_usd.item(),',.2f')}") print(f"Total rounds: {company.funding_rounds.item()}") print(f"Webpage: {company.homepage_url.item()}") print(f"Country: {company.country_code.item()}") print(f"Status: {company.status.item()}") # Find competitors print('\n') print(f"Competitors similar to {company.name.item()}:") print('\n') competitor_info(find_companies_by_size('small', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('medium', companies, name, sector, company)) print('\n') competitor_info(find_companies_by_size('big', companies, name, sector, company)) except: print(f"We couldn't find information about {name} in Crunchbase") #Twitter sentiment analysis for company tw_analysis_company(public_tweets, name) # Google search most_warnings(find_webs(name), look_for)
action = input("Did you mean %s instead? [y or n]: " % get_close_matches(my_name, companies_list)[0]) if (action == "y"): return get_close_matches(my_name, companies_list)[0] elif (action == "n"): return my_name else: return("we don't understand you. Apologies.")
conditional_block
train.py
""" Training script. Should be pretty adaptable to whatever. """ # -*- coding: UTF-8 -*- import os # os.environ["CUDA_VISIBLE_DEVICES"] = "3" # f = os.popen("python train.py -params multiatt/default.json -folder saves/flagship_answer") import sys PYTHON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(PYTHON_PATH) import argparse import shutil import multiprocessing import numpy as np import pandas as pd import torch from allennlp.common.params import Params from allennlp.training.learning_rate_schedulers import LearningRateScheduler from allennlp.training.optimizers import Optimizer from torch.nn import DataParallel from tqdm import tqdm from torch.utils.data import DataLoader from dataloaders.vcr_attribute import VCR, VCRLoader from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \ restore_checkpoint, print_para, restore_best_checkpoint import logging from tensorboardX import SummaryWriter import json logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG) # This is needed to make the imports work from allennlp.models import Model import models torch.backends.cudnn.enabled = False torch.set_printoptions(threshold=500000000, linewidth=8000000) ################################# ################################# ######## Data loading stuff ################################# ################################# parser = argparse.ArgumentParser(description='train') parser.add_argument( '-params', dest='params', help='Params location', type=str, ) parser.add_argument( '-rationale', action="store_true", help='use rationale', ) parser.add_argument( '-output', type=str ) parser.add_argument( '-no_tqdm', dest='no_tqdm', action='store_true', ) parser.add_argument( '-batch_size', dest='batch_size', type=int, default=96 ) parser.add_argument( '-records', type=str, default='records.json' ) parser.add_argument( '-describe', type=str, default='' ) args = parser.parse_args() seed = 1111 torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) batch_size = args.batch_size only_use_relevant_dets = False # args.rationale = True # args.params = 'models/multiatt/default2.json' folder = f'saves/{args.output}' writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}') params = Params.from_file(args.params) train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'), only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box # NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4 NUM_GPUS = 1 NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32 if NUM_GPUS == 0: raise ValueError("you need gpus!") def _to_gpu(td): if NUM_GPUS > 1: return td for k in td: if k != 'metadata': td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[ k].cuda( non_blocking=True) return td # num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS) num_workers = 8 print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True) loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True} # train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers) # val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) # test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) train_loader = VCRLoader.from_dataset(train, **loader_params) val_loader = VCRLoader.from_dataset(val, **loader_params) test_loader = VCRLoader.from_dataset(test, **loader_params) # train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4) # val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4) # test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4) ARGS_RESET_EVERY = 600 print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True) model = Model.from_params(vocab=train.vocab, params=params['model']) model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda() optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad], params=params['trainer']['optimizer']) lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None) scheduler = LearningRateScheduler.from_params(optimizer=optimizer, params=lr_scheduler_params) if lr_scheduler_params else None if os.path.exists(folder): print("Found folder! restoring", flush=True) start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder, learning_rate_scheduler=scheduler) # start_epoch, val_metric_per_epoch = 0, [] print(start_epoch) print(val_metric_per_epoch) else: print("Making directories") os.makedirs(folder, exist_ok=True) start_epoch, val_metric_per_epoch = 0, [] shutil.copy2(args.params, folder) with open(os.path.join(folder, 'describe.txt'), 'a') as fp: fp.write(args.describe) fp.write('\n--------------------------\n') logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8') # store best performance of all models in a file param_shapes = print_para(model) num_batches = 0 for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10): train_results = [] norms = [] model.train() for b, (time_per_batch, batch) in enumerate( time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)): batch = _to_gpu(batch) optimizer.zero_grad() output_dict = model(**batch) loss = output_dict['loss'].mean() loss.backward() num_batches += 1 if scheduler: scheduler.step_batch(num_batches) norms.append( clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False) ) optimizer.step() train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(), **(model.module if NUM_GPUS > 1 else model).get_metrics( reset=(b % ARGS_RESET_EVERY) == 0), 'sec_per_batch': time_per_batch, 'hr_per_epoch': len(train_loader) * time_per_batch / 3600, })) if b % ARGS_RESET_EVERY == 0 and b > 0: norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join( param_shapes[['shape', 'size']]).sort_values('norm', ascending=False) print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), ), flush=True) logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), )) writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'], global_step=num_batches) writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'], global_step=num_batches) print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) val_probs = [] val_labels = [] val_loss_sum = 0.0 q_att1 = [] a_att1 = [] q_att2 = [] a_att2 = [] model.eval() for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad():
enate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) val_loss_avg = val_loss_sum / val_labels.shape[0] val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1)))) if scheduler: scheduler.step(val_metric_per_epoch[-1]) print("Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg), flush=True) logger.write( "Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg)) if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']): print("Stopping at epoch {:2d}".format(epoch_num)) logger.write("Stopping at epoch {:2d}".format(epoch_num)) break save_checkpoint(model, optimizer, folder, epoch_num, val_metric_per_epoch, is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1), q_att1=q_att1, a_att1=a_att1, q_att2=q_att2, a_att2=a_att2) writer.add_scalar('val_loss', val_loss_avg, global_step=epoch_num) writer.add_scalar('val_accuracy', val_metric_per_epoch[-1], global_step=epoch_num) print("STOPPING. now running the best model on the validation set", flush=True) logger.write("STOPPING. now running the best model on the validation set") # Load best restore_best_checkpoint(model, folder) model.eval() val_probs = [] val_labels = [] for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) acc = float(np.mean(val_labels == val_probs.argmax(1))) print("Final val accuracy is {:.4f}".format(acc)) logger.write("Final val accuracy is {:.4f}".format(acc)) np.save(os.path.join(folder, f'valpreds.npy'), val_probs)
batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0] q_att1.append(output_dict['q_att1']) a_att1.append(output_dict['a_att1']) q_att2.append(output_dict['q_att2']) a_att2.append(output_dict['a_att2']) val_labels = np.concat
conditional_block
train.py
""" Training script. Should be pretty adaptable to whatever. """ # -*- coding: UTF-8 -*- import os # os.environ["CUDA_VISIBLE_DEVICES"] = "3" # f = os.popen("python train.py -params multiatt/default.json -folder saves/flagship_answer") import sys PYTHON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(PYTHON_PATH) import argparse import shutil import multiprocessing import numpy as np import pandas as pd import torch from allennlp.common.params import Params from allennlp.training.learning_rate_schedulers import LearningRateScheduler from allennlp.training.optimizers import Optimizer from torch.nn import DataParallel from tqdm import tqdm from torch.utils.data import DataLoader from dataloaders.vcr_attribute import VCR, VCRLoader from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \ restore_checkpoint, print_para, restore_best_checkpoint import logging from tensorboardX import SummaryWriter import json logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG) # This is needed to make the imports work from allennlp.models import Model import models torch.backends.cudnn.enabled = False torch.set_printoptions(threshold=500000000, linewidth=8000000) ################################# ################################# ######## Data loading stuff ################################# ################################# parser = argparse.ArgumentParser(description='train') parser.add_argument( '-params', dest='params', help='Params location', type=str, ) parser.add_argument( '-rationale', action="store_true", help='use rationale', ) parser.add_argument( '-output', type=str ) parser.add_argument( '-no_tqdm', dest='no_tqdm', action='store_true', ) parser.add_argument( '-batch_size', dest='batch_size', type=int, default=96 ) parser.add_argument( '-records', type=str, default='records.json' ) parser.add_argument( '-describe', type=str, default='' ) args = parser.parse_args() seed = 1111 torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) batch_size = args.batch_size only_use_relevant_dets = False # args.rationale = True # args.params = 'models/multiatt/default2.json' folder = f'saves/{args.output}' writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}') params = Params.from_file(args.params) train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'), only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box # NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4 NUM_GPUS = 1 NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32 if NUM_GPUS == 0: raise ValueError("you need gpus!") def _to_gpu(td): if NUM_GPUS
return td for k in td: if k != 'metadata': td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[ k].cuda( non_blocking=True) return td # num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS) num_workers = 8 print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True) loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True} # train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers) # val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) # test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) train_loader = VCRLoader.from_dataset(train, **loader_params) val_loader = VCRLoader.from_dataset(val, **loader_params) test_loader = VCRLoader.from_dataset(test, **loader_params) # train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4) # val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4) # test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4) ARGS_RESET_EVERY = 600 print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True) model = Model.from_params(vocab=train.vocab, params=params['model']) model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda() optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad], params=params['trainer']['optimizer']) lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None) scheduler = LearningRateScheduler.from_params(optimizer=optimizer, params=lr_scheduler_params) if lr_scheduler_params else None if os.path.exists(folder): print("Found folder! restoring", flush=True) start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder, learning_rate_scheduler=scheduler) # start_epoch, val_metric_per_epoch = 0, [] print(start_epoch) print(val_metric_per_epoch) else: print("Making directories") os.makedirs(folder, exist_ok=True) start_epoch, val_metric_per_epoch = 0, [] shutil.copy2(args.params, folder) with open(os.path.join(folder, 'describe.txt'), 'a') as fp: fp.write(args.describe) fp.write('\n--------------------------\n') logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8') # store best performance of all models in a file param_shapes = print_para(model) num_batches = 0 for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10): train_results = [] norms = [] model.train() for b, (time_per_batch, batch) in enumerate( time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)): batch = _to_gpu(batch) optimizer.zero_grad() output_dict = model(**batch) loss = output_dict['loss'].mean() loss.backward() num_batches += 1 if scheduler: scheduler.step_batch(num_batches) norms.append( clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False) ) optimizer.step() train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(), **(model.module if NUM_GPUS > 1 else model).get_metrics( reset=(b % ARGS_RESET_EVERY) == 0), 'sec_per_batch': time_per_batch, 'hr_per_epoch': len(train_loader) * time_per_batch / 3600, })) if b % ARGS_RESET_EVERY == 0 and b > 0: norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join( param_shapes[['shape', 'size']]).sort_values('norm', ascending=False) print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), ), flush=True) logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), )) writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'], global_step=num_batches) writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'], global_step=num_batches) print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) val_probs = [] val_labels = [] val_loss_sum = 0.0 q_att1 = [] a_att1 = [] q_att2 = [] a_att2 = [] model.eval() for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0] q_att1.append(output_dict['q_att1']) a_att1.append(output_dict['a_att1']) q_att2.append(output_dict['q_att2']) a_att2.append(output_dict['a_att2']) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) val_loss_avg = val_loss_sum / val_labels.shape[0] val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1)))) if scheduler: scheduler.step(val_metric_per_epoch[-1]) print("Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg), flush=True) logger.write( "Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg)) if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']): print("Stopping at epoch {:2d}".format(epoch_num)) logger.write("Stopping at epoch {:2d}".format(epoch_num)) break save_checkpoint(model, optimizer, folder, epoch_num, val_metric_per_epoch, is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1), q_att1=q_att1, a_att1=a_att1, q_att2=q_att2, a_att2=a_att2) writer.add_scalar('val_loss', val_loss_avg, global_step=epoch_num) writer.add_scalar('val_accuracy', val_metric_per_epoch[-1], global_step=epoch_num) print("STOPPING. now running the best model on the validation set", flush=True) logger.write("STOPPING. now running the best model on the validation set") # Load best restore_best_checkpoint(model, folder) model.eval() val_probs = [] val_labels = [] for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) acc = float(np.mean(val_labels == val_probs.argmax(1))) print("Final val accuracy is {:.4f}".format(acc)) logger.write("Final val accuracy is {:.4f}".format(acc)) np.save(os.path.join(folder, f'valpreds.npy'), val_probs)
> 1:
identifier_name
train.py
""" Training script. Should be pretty adaptable to whatever. """ # -*- coding: UTF-8 -*- import os # os.environ["CUDA_VISIBLE_DEVICES"] = "3" # f = os.popen("python train.py -params multiatt/default.json -folder saves/flagship_answer") import sys PYTHON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(PYTHON_PATH) import argparse import shutil import multiprocessing import numpy as np import pandas as pd import torch from allennlp.common.params import Params from allennlp.training.learning_rate_schedulers import LearningRateScheduler from allennlp.training.optimizers import Optimizer from torch.nn import DataParallel from tqdm import tqdm from torch.utils.data import DataLoader from dataloaders.vcr_attribute import VCR, VCRLoader from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \ restore_checkpoint, print_para, restore_best_checkpoint import logging from tensorboardX import SummaryWriter import json logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG) # This is needed to make the imports work from allennlp.models import Model import models torch.backends.cudnn.enabled = False torch.set_printoptions(threshold=500000000, linewidth=8000000) ################################# ################################# ######## Data loading stuff ################################# ################################# parser = argparse.ArgumentParser(description='train') parser.add_argument( '-params', dest='params', help='Params location', type=str, ) parser.add_argument( '-rationale', action="store_true", help='use rationale', ) parser.add_argument( '-output', type=str ) parser.add_argument( '-no_tqdm', dest='no_tqdm', action='store_true', ) parser.add_argument( '-batch_size', dest='batch_size', type=int, default=96 ) parser.add_argument( '-records', type=str, default='records.json' ) parser.add_argument( '-describe', type=str, default='' ) args = parser.parse_args() seed = 1111 torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) batch_size = args.batch_size only_use_relevant_dets = False # args.rationale = True # args.params = 'models/multiatt/default2.json' folder = f'saves/{args.output}' writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}') params = Params.from_file(args.params) train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'), only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets', only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box # NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4 NUM_GPUS = 1 NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32 if NUM_GPUS == 0: raise ValueError("you need gpus!") def _to_gpu(td): if NUM_GPUS > 1: ret
GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS) num_workers = 8 print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True) loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True} # train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers) # val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) # test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) train_loader = VCRLoader.from_dataset(train, **loader_params) val_loader = VCRLoader.from_dataset(val, **loader_params) test_loader = VCRLoader.from_dataset(test, **loader_params) # train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4) # val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4) # test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4) ARGS_RESET_EVERY = 600 print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True) model = Model.from_params(vocab=train.vocab, params=params['model']) model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda() optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad], params=params['trainer']['optimizer']) lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None) scheduler = LearningRateScheduler.from_params(optimizer=optimizer, params=lr_scheduler_params) if lr_scheduler_params else None if os.path.exists(folder): print("Found folder! restoring", flush=True) start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder, learning_rate_scheduler=scheduler) # start_epoch, val_metric_per_epoch = 0, [] print(start_epoch) print(val_metric_per_epoch) else: print("Making directories") os.makedirs(folder, exist_ok=True) start_epoch, val_metric_per_epoch = 0, [] shutil.copy2(args.params, folder) with open(os.path.join(folder, 'describe.txt'), 'a') as fp: fp.write(args.describe) fp.write('\n--------------------------\n') logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8') # store best performance of all models in a file param_shapes = print_para(model) num_batches = 0 for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10): train_results = [] norms = [] model.train() for b, (time_per_batch, batch) in enumerate( time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)): batch = _to_gpu(batch) optimizer.zero_grad() output_dict = model(**batch) loss = output_dict['loss'].mean() loss.backward() num_batches += 1 if scheduler: scheduler.step_batch(num_batches) norms.append( clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False) ) optimizer.step() train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(), **(model.module if NUM_GPUS > 1 else model).get_metrics( reset=(b % ARGS_RESET_EVERY) == 0), 'sec_per_batch': time_per_batch, 'hr_per_epoch': len(train_loader) * time_per_batch / 3600, })) if b % ARGS_RESET_EVERY == 0 and b > 0: norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join( param_shapes[['shape', 'size']]).sort_values('norm', ascending=False) print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), ), flush=True) logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), )) writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'], global_step=num_batches) writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'], global_step=num_batches) print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) val_probs = [] val_labels = [] val_loss_sum = 0.0 q_att1 = [] a_att1 = [] q_att2 = [] a_att2 = [] model.eval() for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0] q_att1.append(output_dict['q_att1']) a_att1.append(output_dict['a_att1']) q_att2.append(output_dict['q_att2']) a_att2.append(output_dict['a_att2']) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) val_loss_avg = val_loss_sum / val_labels.shape[0] val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1)))) if scheduler: scheduler.step(val_metric_per_epoch[-1]) print("Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg), flush=True) logger.write( "Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg)) if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']): print("Stopping at epoch {:2d}".format(epoch_num)) logger.write("Stopping at epoch {:2d}".format(epoch_num)) break save_checkpoint(model, optimizer, folder, epoch_num, val_metric_per_epoch, is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1), q_att1=q_att1, a_att1=a_att1, q_att2=q_att2, a_att2=a_att2) writer.add_scalar('val_loss', val_loss_avg, global_step=epoch_num) writer.add_scalar('val_accuracy', val_metric_per_epoch[-1], global_step=epoch_num) print("STOPPING. now running the best model on the validation set", flush=True) logger.write("STOPPING. now running the best model on the validation set") # Load best restore_best_checkpoint(model, folder) model.eval() val_probs = [] val_labels = [] for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) acc = float(np.mean(val_labels == val_probs.argmax(1))) print("Final val accuracy is {:.4f}".format(acc)) logger.write("Final val accuracy is {:.4f}".format(acc)) np.save(os.path.join(folder, f'valpreds.npy'), val_probs)
urn td for k in td: if k != 'metadata': td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[ k].cuda( non_blocking=True) return td # num_workers = (8 * NUM_
identifier_body
train.py
""" Training script. Should be pretty adaptable to whatever. """ # -*- coding: UTF-8 -*- import os # os.environ["CUDA_VISIBLE_DEVICES"] = "3" # f = os.popen("python train.py -params multiatt/default.json -folder saves/flagship_answer") import sys PYTHON_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(PYTHON_PATH) import argparse import shutil import multiprocessing import numpy as np import pandas as pd import torch from allennlp.common.params import Params from allennlp.training.learning_rate_schedulers import LearningRateScheduler from allennlp.training.optimizers import Optimizer from torch.nn import DataParallel from tqdm import tqdm from torch.utils.data import DataLoader from dataloaders.vcr_attribute import VCR, VCRLoader from utils.pytorch_misc import time_batch, save_checkpoint, clip_grad_norm, \ restore_checkpoint, print_para, restore_best_checkpoint import logging from tensorboardX import SummaryWriter import json logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', level=logging.DEBUG) # This is needed to make the imports work from allennlp.models import Model import models torch.backends.cudnn.enabled = False torch.set_printoptions(threshold=500000000, linewidth=8000000) ################################# ################################# ######## Data loading stuff ################################# ################################# parser = argparse.ArgumentParser(description='train') parser.add_argument( '-params', dest='params', help='Params location', type=str, ) parser.add_argument( '-rationale', action="store_true", help='use rationale', ) parser.add_argument( '-output', type=str ) parser.add_argument( '-no_tqdm', dest='no_tqdm', action='store_true', ) parser.add_argument( '-batch_size', dest='batch_size', type=int, default=96 ) parser.add_argument( '-records', type=str, default='records.json' ) parser.add_argument( '-describe', type=str, default='' ) args = parser.parse_args() seed = 1111 torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) batch_size = args.batch_size only_use_relevant_dets = False # args.rationale = True # args.params = 'models/multiatt/default2.json' folder = f'saves/{args.output}' writer = SummaryWriter(f'/home/share/wangkejie/vcr1/runs/{args.output}') params = Params.from_file(args.params) train, val, test = VCR.splits(embs_to_load=params['dataset_reader'].get('embs', 'bert_da'), only_use_relevant_dets=params['dataset_reader'].get('only_use_relevant_dets',
NUM_CPUS = multiprocessing.cpu_count() # NUM_CPUS = 32 if NUM_GPUS == 0: raise ValueError("you need gpus!") def _to_gpu(td): if NUM_GPUS > 1: return td for k in td: if k != 'metadata': td[k] = {k2: v.cuda(non_blocking=True) for k2, v in td[k].items()} if isinstance(td[k], dict) else td[ k].cuda( non_blocking=True) return td # num_workers = (8 * NUM_GPUS if NUM_CPUS == 32 else 2 * NUM_GPUS) num_workers = 8 print(f"Using {num_workers} workers out of {NUM_CPUS} possible", flush=True) loader_params = {'batch_size': batch_size, 'num_workers': num_workers, "pin_memory": True} # train_loader = DataLoader(train, shuffle=True, collate_fn=collate_fn, drop_last=True, batch_size=batch_size, num_workers=num_workers) # val_loader = DataLoader(val, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) # test_loader = DataLoader(test, shuffle=False, collate_fn=collate_fn, batch_size=batch_size, num_workers=num_workers) train_loader = VCRLoader.from_dataset(train, **loader_params) val_loader = VCRLoader.from_dataset(val, **loader_params) test_loader = VCRLoader.from_dataset(test, **loader_params) # train_loader = CudaDataLoader(train_loader, device='cuda', queue_size=4) # val_loader = CudaDataLoader(val_loader, device='cuda', queue_size=4) # test_loader = CudaDataLoader(test_loader, device='cuda', queue_size=4) ARGS_RESET_EVERY = 600 print("Loading {} for {}".format(params['model'].get('type', 'WTF?'), 'rationales' if args.rationale else 'answer'), flush=True) model = Model.from_params(vocab=train.vocab, params=params['model']) model = DataParallel(model).cuda() if NUM_GPUS > 1 else model.cuda() optimizer = Optimizer.from_params(model_parameters=[x for x in model.named_parameters() if x[1].requires_grad], params=params['trainer']['optimizer']) lr_scheduler_params = params['trainer'].pop("learning_rate_scheduler", None) scheduler = LearningRateScheduler.from_params(optimizer=optimizer, params=lr_scheduler_params) if lr_scheduler_params else None if os.path.exists(folder): print("Found folder! restoring", flush=True) start_epoch, val_metric_per_epoch = restore_checkpoint(model, optimizer, serialization_dir=folder, learning_rate_scheduler=scheduler) # start_epoch, val_metric_per_epoch = 0, [] print(start_epoch) print(val_metric_per_epoch) else: print("Making directories") os.makedirs(folder, exist_ok=True) start_epoch, val_metric_per_epoch = 0, [] shutil.copy2(args.params, folder) with open(os.path.join(folder, 'describe.txt'), 'a') as fp: fp.write(args.describe) fp.write('\n--------------------------\n') logger = open(f'saves/{args.output}/log.txt', mode='a', encoding='utf8') # store best performance of all models in a file param_shapes = print_para(model) num_batches = 0 for epoch_num in range(start_epoch, params['trainer']['num_epochs'] + start_epoch + 10): train_results = [] norms = [] model.train() for b, (time_per_batch, batch) in enumerate( time_batch(train_loader if args.no_tqdm else tqdm(train_loader, ncols=80), reset_every=ARGS_RESET_EVERY)): batch = _to_gpu(batch) optimizer.zero_grad() output_dict = model(**batch) loss = output_dict['loss'].mean() loss.backward() num_batches += 1 if scheduler: scheduler.step_batch(num_batches) norms.append( clip_grad_norm(model.named_parameters(), max_norm=params['trainer']['grad_norm'], clip=True, verbose=False) ) optimizer.step() train_results.append(pd.Series({'loss': output_dict['loss'].mean().item(), **(model.module if NUM_GPUS > 1 else model).get_metrics( reset=(b % ARGS_RESET_EVERY) == 0), 'sec_per_batch': time_per_batch, 'hr_per_epoch': len(train_loader) * time_per_batch / 3600, })) if b % ARGS_RESET_EVERY == 0 and b > 0: norms_df = pd.DataFrame(pd.DataFrame(norms[-ARGS_RESET_EVERY:]).mean(), columns=['norm']).join( param_shapes[['shape', 'size']]).sort_values('norm', ascending=False) print("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), ), flush=True) logger.write("e{:2d}b{:5d}/{:5d}. norms: \n{}\nsumm:\n{}\n~~~~~~~~~~~~~~~~~~\n".format( epoch_num, b, len(train_loader), norms_df.to_string(formatters={'norm': '{:.7f}'.format}), pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean(), )) writer.add_scalar('training_loss', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['loss'], global_step=num_batches) writer.add_scalar('training_accuracy', pd.DataFrame(train_results[-ARGS_RESET_EVERY:]).mean()['accuracy'], global_step=num_batches) print("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) logger.write("---\nTRAIN EPOCH {:2d}:\n{}\n----".format(epoch_num, pd.DataFrame(train_results).mean())) val_probs = [] val_labels = [] val_loss_sum = 0.0 q_att1 = [] a_att1 = [] q_att2 = [] a_att2 = [] model.eval() for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_loss_sum += output_dict['loss'].mean().item() * batch['label'].shape[0] q_att1.append(output_dict['q_att1']) a_att1.append(output_dict['a_att1']) q_att2.append(output_dict['q_att2']) a_att2.append(output_dict['a_att2']) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) val_loss_avg = val_loss_sum / val_labels.shape[0] val_metric_per_epoch.append(float(np.mean(val_labels == val_probs.argmax(1)))) if scheduler: scheduler.step(val_metric_per_epoch[-1]) print("Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg), flush=True) logger.write( "Val epoch {} has acc {:.4f} and loss {:.4f}".format(epoch_num, val_metric_per_epoch[-1], val_loss_avg)) if int(np.argmax(val_metric_per_epoch)) < (len(val_metric_per_epoch) - 1 - params['trainer']['patience']): print("Stopping at epoch {:2d}".format(epoch_num)) logger.write("Stopping at epoch {:2d}".format(epoch_num)) break save_checkpoint(model, optimizer, folder, epoch_num, val_metric_per_epoch, is_best=int(np.argmax(val_metric_per_epoch)) == (len(val_metric_per_epoch) - 1), q_att1=q_att1, a_att1=a_att1, q_att2=q_att2, a_att2=a_att2) writer.add_scalar('val_loss', val_loss_avg, global_step=epoch_num) writer.add_scalar('val_accuracy', val_metric_per_epoch[-1], global_step=epoch_num) print("STOPPING. now running the best model on the validation set", flush=True) logger.write("STOPPING. now running the best model on the validation set") # Load best restore_best_checkpoint(model, folder) model.eval() val_probs = [] val_labels = [] for b, (time_per_batch, batch) in enumerate(time_batch(val_loader)): with torch.no_grad(): batch = _to_gpu(batch) output_dict = model(**batch) val_probs.append(output_dict['label_probs'].detach().cpu().numpy()) val_labels.append(batch['label'].detach().cpu().numpy()) val_labels = np.concatenate(val_labels, 0) val_probs = np.concatenate(val_probs, 0) acc = float(np.mean(val_labels == val_probs.argmax(1))) print("Final val accuracy is {:.4f}".format(acc)) logger.write("Final val accuracy is {:.4f}".format(acc)) np.save(os.path.join(folder, f'valpreds.npy'), val_probs)
only_use_relevant_dets)) ########################这个地方我改成了false,使用全部的box # NUM_GPUS = torch.cuda.device_count() # NUM_GPUS = 4 NUM_GPUS = 1
random_line_split
lab.py
#!/usr/bin/env python3 from util import read_osm_data, great_circle_distance, to_local_kml_url import time # NO ADDITIONAL IMPORTS! ALLOWED_HIGHWAY_TYPES = { 'motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'unclassified', 'residential', 'living_street', 'motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link', } DEFAULT_SPEED_LIMIT_MPH = { 'motorway': 60, 'trunk': 45, 'primary': 35, 'secondary': 30, 'residential': 25, 'tertiary': 25, 'unclassified': 25, 'living_street': 10, 'motorway_link': 30, 'trunk_link': 30, 'primary_link': 30, 'secondary_link': 30, 'tertiary_link': 25, } def build_auxiliary_structures(nodes_filename, ways_filename): """ Create any auxiliary structures you are interested in, by reading the data from the given filenames (using read_osm_data) """ nodes = {} for way in read_osm_data(ways_filename): highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)') if highway_type in ALLOWED_HIGHWAY_TYPES: nodes_along_way = way['nodes'] # List of nodes along this way for i in range(len(nodes_along_way) - 1): # A pair of adjacent nodes along this way left = nodes_along_way[i] right = nodes_along_way[i + 1] default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type] # If this way doesn't have a speed limit tag, we use the default value based on highway type speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit) def build_data(root, adjacent): """ root: ID of some node along way adjacent: ID of some node adjacent to root node along way """ new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure root_data = nodes.get(root, new_node_data_struct) # There might be another way where root and adjacent are directly adjacent, so our # speed limit is the max of the speed limits of those two ways: root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit) nodes[root] = root_data # Add the data on root to our dictionary of node data build_data(left, right) if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes': # If this isn't a oneway way, we can build the data structure for the next node as well build_data(right, left) elif right == nodes_along_way[-1]: # In non-oneway ways, the above build_data(right, left) call creates the data structure # for the final node at the same time as the penultimate one. However, in the case of a # oneway path, we have to do it manually: nodes[right] = nodes.get(right, {'adjacent': {}}) for node in read_osm_data(nodes_filename): id = node['id'] if id in nodes: # If the id of this node in the generator was on a valid way, we add the data about that node # to its dictionary in nodes. # Add lat/lon data nodes[id]['lat'] = node['lat'] nodes[id]['lon'] = node['lon'] return nodes class Heap: def __init__(self, prop, start=None, start_item=None): self.property = 'min' if (prop == 'min') else 'max' # Heap property self.heap = [] # List representation of the heap self.items = [] # A list of the items corresponding to each index in the heap self.size = 0 if isinstance(start, list): self.heap = start[:] self.items = start_item if start_item is not None else [None] * len(start) self.size = len(self.heap) for i in range(len(start) // 2, -1, -1): # Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property # We loop backwards over the array and max heapify down so we always maintain our heap property # at every index after i self.heapify_down(i) elif start is not None: self.add(start, start_item) self.size = 1 def parent(self, i): # Returns the index of i's parent if it has one return (i + 1) // 2 - 1 if i > 0 else i def left(self, i): # Returns the index of i's left child if it has one return 2 * i + 1 if i < self.size else i def right(self, i): # Returns the index of i's right child if it has one return 2 * (i + 1) if i < self.size else i def add(self, val, item=None): # Add value to heap self.heap.append(val) self.items.append(item) self.size += 1 self.heapify_up(self.size - 1) def next(self): # Get the value at the top of the heap if self.size > 0: if self.size == 1: self.size -= 1 return self.heap.pop(0), self.items.pop(0) else: # Swap element at the top of the heap to the end self.swap(0, self.size - 1) top = self.heap.pop(self.size - 1) top_item = self.items.pop(self.size - 1) self.size -= 1 self.heapify_down(0) # Heapify from the top return top, top_item def heapify_up(self, i): # Assume everything below i fulfills the heap property, shift value at index i up until # our heap property is fulfilled across the entire heap p = self.parent(i) if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or (self.property == 'min' and self.heap[i] < self.heap[p])): # If node i violates this heap's heap property, swap it with its parent, then check again: self.swap(i, p) self.heapify_up(p) def heapify_down(self, p): # Assume everything below p fulfills the heap property, shift value at index p down until # our heap property is fulfilled across the entire heap l, r = self.left(p), self.right(p) if l >= self.size: # If p has no children, we do nothing return if self.property == 'max': c = l if r >= self.size or self.heap[l] > self.heap[r] else r if self.heap[p] < self.heap[c]: # If node p violates this heap's max heap property, swap it with its larger child, then check again: self.swap(p, c) self.heapify_down(c) else: # if property == 'min' c = l if r >= self.size or self.heap[l] < self.heap[r] else r if self.heap[p] > self.heap[c]: # If node p violates this heap's min heap property, swap it with its smaller child, then check again: self.swap(p, c) self.heapify_down(c) def swap(self, a, b): # Swaps the elements of heap and items at indices a and b self.heap[a], self.heap[b] = self.heap[b], self.heap[a] self.items[a], self.items[b] = self.items[b], self.items[a] def empty(self): # Returns true if this heap has no elements return self.size == 0 def __str__(self): # Returns the heap in the form of a list return str(self.heap) def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0): """haha, uniform cost search go brrr""" paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost) seen = set() # Set of nodes we've already found shorter paths to # T H E S E A R C H L O O P B E G I N S while not paths.empty(): next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost)) min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] while terminal_node in seen: # If we've already found a path to the same node with a lower cost, we pick a new next_path if paths.empty(): # If we run out of paths to search, we return nothing return None next_path = paths.next() min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] if is_goal(terminal_node): return min_cost_path seen.add(terminal_node) children = get_children(terminal_node) for c in children: if c not in seen: # If this child does not have an existing path to it already, we build a # data structure for it and at it to our min heap path_to_c = min_cost_path + [c] c_cost = min_cost + cost(data, terminal_node, c) c_heuristic = c_cost + heuristic(c) paths.add(c_heuristic, (path_to_c, c_cost)) # T H E S E A R C H L O O P E N D S return None # We failed to find a path to the goal node. Very sad. Return nothing :( def get_dist_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the distance between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ p1 = get_coords(data, start_node_id) p2 = get_coords(data, end_node_id) return great_circle_distance(p1, p2) def get_coords(data, id): """ Returns the GPS coordinates of a node in the form of a (lat, lon) tuple given its id number """ return data[id]['lat'], data[id]['lon'] def find_short_path_nodes(aux_structures, node1, node2): """ Return the shortest path between the two nodes Parameters: aux_structures: the result of calling build_auxiliary_structures node1: node representing the start location node2: node representing the end location Returns: a list of node IDs representing the shortest path (in terms of distance) from node1 to node2 """ p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return list(p) if p is not None else None def gcd_heuristic(data, node1, node2): return great_circle_distance(get_coords(data, node1), get_coords(data, node2)) def find_short_path(aux_structures, loc1, loc2): """ Retu
losest_node(data, loc): """ Calculates the closest node in the given dataset to a specified query location Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them loc: The query location, given in terms of a tuple of two floats (latitude, longitude) """ min_dist = None closest = None for i in data: # Standard min-value search loop dist = great_circle_distance(get_coords(data, i), loc) if closest is None or dist < min_dist: closest = i min_dist = dist return closest def get_coord_list(data, ids): """ Converts a list of node ids to (latitude, longitude) tuples """ l = len(ids) coord_list = [None] * l for i in range(l): coord_list[i] = get_coords(data, ids[i]) return coord_list def find_fast_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations, in terms of expected time (taking into account speed limits). Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of time) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_speed_cost) return get_coord_list(aux_structures, p) if p is not None else None def get_speed_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the speed limit between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ start_node = data[start_node_id] dist_cost = get_dist_cost(data, start_node_id, end_node_id) # Get the speed limit along the way that connects the starting and ending nodes speed_limit_between_nodes = start_node['adjacent'][end_node_id] return dist_cost / speed_limit_between_nodes # Cost = Time = Distance/Rate def print_data(data): print('Nodes:') for i in data: print("id: " + str(i) + " | " + str(data[i])) if __name__ == '__main__': # additional code here will be run only when lab_old.py is invoked directly # (not when imported from test.py), so this is a good place to put code # used, for example, to generate the results for the online questions. # mit_set = build_auxiliary_structures('resources/mit.nodes', 'resources/mit.ways') # midwest_set = build_auxiliary_structures('resources/midwest.nodes', 'resources/midwest.ways') # cambridge_set = build_auxiliary_structures('resources/cambridge.nodes', 'resources/cambridge.ways') # print(len(find_fast_path(midwest_set, (41.375288, -89.459541), (41.452802, -89.443683)))) # Without heuristic: 372625 pulls # With heuristic: 45928 pulls # Start Server: python3 server.py cambridge # Distance Path: http://localhost:6009/ # Fast Path: http://localhost:6009/?type=fast pass
rn the shortest path between the two locations Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of distance) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return get_coord_list(aux_structures, p) if p is not None else None def get_c
identifier_body
lab.py
#!/usr/bin/env python3 from util import read_osm_data, great_circle_distance, to_local_kml_url import time # NO ADDITIONAL IMPORTS! ALLOWED_HIGHWAY_TYPES = { 'motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'unclassified', 'residential', 'living_street', 'motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link', } DEFAULT_SPEED_LIMIT_MPH = { 'motorway': 60, 'trunk': 45, 'primary': 35, 'secondary': 30, 'residential': 25, 'tertiary': 25, 'unclassified': 25, 'living_street': 10, 'motorway_link': 30, 'trunk_link': 30, 'primary_link': 30, 'secondary_link': 30, 'tertiary_link': 25, } def build_auxiliary_structures(nodes_filename, ways_filename): """ Create any auxiliary structures you are interested in, by reading the data from the given filenames (using read_osm_data) """ nodes = {} for way in read_osm_data(ways_filename): highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)') if highway_type in ALLOWED_HIGHWAY_TYPES: nodes_along_way = way['nodes'] # List of nodes along this way for i in range(len(nodes_along_way) - 1): # A pair of adjacent nodes along this way left = nodes_along_way[i] right = nodes_along_way[i + 1] default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type] # If this way doesn't have a speed limit tag, we use the default value based on highway type speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit) def build_data(root, adjacent): """ root: ID of some node along way adjacent: ID of some node adjacent to root node along way """ new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure root_data = nodes.get(root, new_node_data_struct) # There might be another way where root and adjacent are directly adjacent, so our # speed limit is the max of the speed limits of those two ways: root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit) nodes[root] = root_data # Add the data on root to our dictionary of node data build_data(left, right) if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes': # If this isn't a oneway way, we can build the data structure for the next node as well build_data(right, left) elif right == nodes_along_way[-1]: # In non-oneway ways, the above build_data(right, left) call creates the data structure # for the final node at the same time as the penultimate one. However, in the case of a # oneway path, we have to do it manually: nodes[right] = nodes.get(right, {'adjacent': {}}) for node in read_osm_data(nodes_filename): id = node['id'] if id in nodes: # If the id of this node in the generator was on a valid way, we add the data about that node # to its dictionary in nodes. # Add lat/lon data nodes[id]['lat'] = node['lat'] nodes[id]['lon'] = node['lon'] return nodes class Heap: def __init__(self, prop, start=None, start_item=None): self.property = 'min' if (prop == 'min') else 'max' # Heap property self.heap = [] # List representation of the heap self.items = [] # A list of the items corresponding to each index in the heap self.size = 0 if isinstance(start, list): self.heap = start[:] self.items = start_item if start_item is not None else [None] * len(start) self.size = len(self.heap) for i in range(len(start) // 2, -1, -1): # Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property # We loop backwards over the array and max heapify down so we always maintain our heap property # at every index after i self.heapify_down(i) elif start is not None: self.add(start, start_item) self.size = 1 def parent(self, i): # Returns the index of i's parent if it has one return (i + 1) // 2 - 1 if i > 0 else i def left(self, i): # Returns the index of i's left child if it has one return 2 * i + 1 if i < self.size else i def right(self, i): # Returns the index of i's right child if it has one return 2 * (i + 1) if i < self.size else i def add(self, val, item=None): # Add value to heap self.heap.append(val) self.items.append(item) self.size += 1 self.heapify_up(self.size - 1) def next(self): # Get the value at the top of the heap if self.size > 0: if self.size == 1: self.size -= 1 return self.heap.pop(0), self.items.pop(0) else: # Swap element at the top of the heap to the end self.swap(0, self.size - 1) top = self.heap.pop(self.size - 1) top_item = self.items.pop(self.size - 1) self.size -= 1 self.heapify_down(0) # Heapify from the top return top, top_item def heapify_up(self, i): # Assume everything below i fulfills the heap property, shift value at index i up until # our heap property is fulfilled across the entire heap p = self.parent(i) if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or (self.property == 'min' and self.heap[i] < self.heap[p])): # If node i violates this heap's heap property, swap it with its parent, then check again: self.swap(i, p) self.heapify_up(p) def heapify_down(self, p): # Assume everything below p fulfills the heap property, shift value at index p down until # our heap property is fulfilled across the entire heap l, r = self.left(p), self.right(p) if l >= self.size: # If p has no children, we do nothing return if self.property == 'max': c = l if r >= self.size or self.heap[l] > self.heap[r] else r if self.heap[p] < self.heap[c]: # If node p violates this heap's max heap property, swap it with its larger child, then check again: self.swap(p, c) self.heapify_down(c) else: # if property == 'min' c = l if r >= self.size or self.heap[l] < self.heap[r] else r if self.heap[p] > self.heap[c]: # If node p violates this heap's min heap property, swap it with its smaller child, then check again: self.swap(p, c) self.heapify_down(c) def swap(self, a, b): # Swaps the elements of heap and items at indices a and b self.heap[a], self.heap[b] = self.heap[b], self.heap[a] self.items[a], self.items[b] = self.items[b], self.items[a] def empty(self): # Returns true if this heap has no elements return self.size == 0 def __str__(self): # Returns the heap in the form of a list return str(self.heap) def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0): """haha, uniform cost search go brrr""" paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost) seen = set() # Set of nodes we've already found shorter paths to # T H E S E A R C H L O O P B E G I N S while not paths.empty(): next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost)) min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] while terminal_node in seen: # If we've already found a path to the same node with a lower cost, we pick a new next_path if paths.empty(): # If we run out of paths to search, we return nothing return None next_path = paths.next() min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] if is_goal(terminal_node): return min_cost_path seen.add(terminal_node) children = get_children(terminal_node) for c in children: if c not in seen: # If this child does not have an existing path to it already, we build a # data structure for it and at it to our min heap path_to_c = min_cost_path + [c] c_cost = min_cost + cost(data, terminal_node, c) c_heuristic = c_cost + heuristic(c) paths.add(c_heuristic, (path_to_c, c_cost)) # T H E S E A R C H L O O P E N D S return None # We failed to find a path to the goal node. Very sad. Return nothing :( def get_dist_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the distance between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ p1 = get_coords(data, start_node_id) p2 = get_coords(data, end_node_id) return great_circle_distance(p1, p2) def get_coords(data, id): """ Returns the GPS coordinates of a node in the form of a (lat, lon) tuple given its id number """ return data[id]['lat'], data[id]['lon'] def find_short_path_nodes(aux_structures, node1, node2): """ Return the shortest path between the two nodes Parameters: aux_structures: the result of calling build_auxiliary_structures node1: node representing the start location node2: node representing the end location Returns: a list of node IDs representing the shortest path (in terms of distance) from node1 to node2 """ p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return list(p) if p is not None else None def gcd_heuristic(data, node1, node2): return great_circle_distance(get_coords(data, node1), get_coords(data, node2)) def find_short_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of distance) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return get_coord_list(aux_structures, p) if p is not None else None def get_closest_node(data, loc): """ Calculates the closest node in the given dataset to a specified query location Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them loc: The query location, given in terms of a tuple of two floats (latitude, longitude) """ min_dist = None closest = None for i in data: # Standard min-value search loop dist = great_circle_distance(get_coords(data, i), loc) if closest is None or dist < min_dist: closest = i min_dist = dist return closest def get_coord_li
""" Converts a list of node ids to (latitude, longitude) tuples """ l = len(ids) coord_list = [None] * l for i in range(l): coord_list[i] = get_coords(data, ids[i]) return coord_list def find_fast_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations, in terms of expected time (taking into account speed limits). Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of time) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_speed_cost) return get_coord_list(aux_structures, p) if p is not None else None def get_speed_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the speed limit between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ start_node = data[start_node_id] dist_cost = get_dist_cost(data, start_node_id, end_node_id) # Get the speed limit along the way that connects the starting and ending nodes speed_limit_between_nodes = start_node['adjacent'][end_node_id] return dist_cost / speed_limit_between_nodes # Cost = Time = Distance/Rate def print_data(data): print('Nodes:') for i in data: print("id: " + str(i) + " | " + str(data[i])) if __name__ == '__main__': # additional code here will be run only when lab_old.py is invoked directly # (not when imported from test.py), so this is a good place to put code # used, for example, to generate the results for the online questions. # mit_set = build_auxiliary_structures('resources/mit.nodes', 'resources/mit.ways') # midwest_set = build_auxiliary_structures('resources/midwest.nodes', 'resources/midwest.ways') # cambridge_set = build_auxiliary_structures('resources/cambridge.nodes', 'resources/cambridge.ways') # print(len(find_fast_path(midwest_set, (41.375288, -89.459541), (41.452802, -89.443683)))) # Without heuristic: 372625 pulls # With heuristic: 45928 pulls # Start Server: python3 server.py cambridge # Distance Path: http://localhost:6009/ # Fast Path: http://localhost:6009/?type=fast pass
st(data, ids):
identifier_name
lab.py
#!/usr/bin/env python3 from util import read_osm_data, great_circle_distance, to_local_kml_url import time # NO ADDITIONAL IMPORTS! ALLOWED_HIGHWAY_TYPES = { 'motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'unclassified', 'residential', 'living_street', 'motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link', } DEFAULT_SPEED_LIMIT_MPH = { 'motorway': 60, 'trunk': 45, 'primary': 35, 'secondary': 30, 'residential': 25, 'tertiary': 25, 'unclassified': 25, 'living_street': 10, 'motorway_link': 30, 'trunk_link': 30, 'primary_link': 30, 'secondary_link': 30, 'tertiary_link': 25, }
def build_auxiliary_structures(nodes_filename, ways_filename): """ Create any auxiliary structures you are interested in, by reading the data from the given filenames (using read_osm_data) """ nodes = {} for way in read_osm_data(ways_filename): highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)') if highway_type in ALLOWED_HIGHWAY_TYPES: nodes_along_way = way['nodes'] # List of nodes along this way for i in range(len(nodes_along_way) - 1): # A pair of adjacent nodes along this way left = nodes_along_way[i] right = nodes_along_way[i + 1] default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type] # If this way doesn't have a speed limit tag, we use the default value based on highway type speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit) def build_data(root, adjacent): """ root: ID of some node along way adjacent: ID of some node adjacent to root node along way """ new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure root_data = nodes.get(root, new_node_data_struct) # There might be another way where root and adjacent are directly adjacent, so our # speed limit is the max of the speed limits of those two ways: root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit) nodes[root] = root_data # Add the data on root to our dictionary of node data build_data(left, right) if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes': # If this isn't a oneway way, we can build the data structure for the next node as well build_data(right, left) elif right == nodes_along_way[-1]: # In non-oneway ways, the above build_data(right, left) call creates the data structure # for the final node at the same time as the penultimate one. However, in the case of a # oneway path, we have to do it manually: nodes[right] = nodes.get(right, {'adjacent': {}}) for node in read_osm_data(nodes_filename): id = node['id'] if id in nodes: # If the id of this node in the generator was on a valid way, we add the data about that node # to its dictionary in nodes. # Add lat/lon data nodes[id]['lat'] = node['lat'] nodes[id]['lon'] = node['lon'] return nodes class Heap: def __init__(self, prop, start=None, start_item=None): self.property = 'min' if (prop == 'min') else 'max' # Heap property self.heap = [] # List representation of the heap self.items = [] # A list of the items corresponding to each index in the heap self.size = 0 if isinstance(start, list): self.heap = start[:] self.items = start_item if start_item is not None else [None] * len(start) self.size = len(self.heap) for i in range(len(start) // 2, -1, -1): # Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property # We loop backwards over the array and max heapify down so we always maintain our heap property # at every index after i self.heapify_down(i) elif start is not None: self.add(start, start_item) self.size = 1 def parent(self, i): # Returns the index of i's parent if it has one return (i + 1) // 2 - 1 if i > 0 else i def left(self, i): # Returns the index of i's left child if it has one return 2 * i + 1 if i < self.size else i def right(self, i): # Returns the index of i's right child if it has one return 2 * (i + 1) if i < self.size else i def add(self, val, item=None): # Add value to heap self.heap.append(val) self.items.append(item) self.size += 1 self.heapify_up(self.size - 1) def next(self): # Get the value at the top of the heap if self.size > 0: if self.size == 1: self.size -= 1 return self.heap.pop(0), self.items.pop(0) else: # Swap element at the top of the heap to the end self.swap(0, self.size - 1) top = self.heap.pop(self.size - 1) top_item = self.items.pop(self.size - 1) self.size -= 1 self.heapify_down(0) # Heapify from the top return top, top_item def heapify_up(self, i): # Assume everything below i fulfills the heap property, shift value at index i up until # our heap property is fulfilled across the entire heap p = self.parent(i) if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or (self.property == 'min' and self.heap[i] < self.heap[p])): # If node i violates this heap's heap property, swap it with its parent, then check again: self.swap(i, p) self.heapify_up(p) def heapify_down(self, p): # Assume everything below p fulfills the heap property, shift value at index p down until # our heap property is fulfilled across the entire heap l, r = self.left(p), self.right(p) if l >= self.size: # If p has no children, we do nothing return if self.property == 'max': c = l if r >= self.size or self.heap[l] > self.heap[r] else r if self.heap[p] < self.heap[c]: # If node p violates this heap's max heap property, swap it with its larger child, then check again: self.swap(p, c) self.heapify_down(c) else: # if property == 'min' c = l if r >= self.size or self.heap[l] < self.heap[r] else r if self.heap[p] > self.heap[c]: # If node p violates this heap's min heap property, swap it with its smaller child, then check again: self.swap(p, c) self.heapify_down(c) def swap(self, a, b): # Swaps the elements of heap and items at indices a and b self.heap[a], self.heap[b] = self.heap[b], self.heap[a] self.items[a], self.items[b] = self.items[b], self.items[a] def empty(self): # Returns true if this heap has no elements return self.size == 0 def __str__(self): # Returns the heap in the form of a list return str(self.heap) def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0): """haha, uniform cost search go brrr""" paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost) seen = set() # Set of nodes we've already found shorter paths to # T H E S E A R C H L O O P B E G I N S while not paths.empty(): next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost)) min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] while terminal_node in seen: # If we've already found a path to the same node with a lower cost, we pick a new next_path if paths.empty(): # If we run out of paths to search, we return nothing return None next_path = paths.next() min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] if is_goal(terminal_node): return min_cost_path seen.add(terminal_node) children = get_children(terminal_node) for c in children: if c not in seen: # If this child does not have an existing path to it already, we build a # data structure for it and at it to our min heap path_to_c = min_cost_path + [c] c_cost = min_cost + cost(data, terminal_node, c) c_heuristic = c_cost + heuristic(c) paths.add(c_heuristic, (path_to_c, c_cost)) # T H E S E A R C H L O O P E N D S return None # We failed to find a path to the goal node. Very sad. Return nothing :( def get_dist_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the distance between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ p1 = get_coords(data, start_node_id) p2 = get_coords(data, end_node_id) return great_circle_distance(p1, p2) def get_coords(data, id): """ Returns the GPS coordinates of a node in the form of a (lat, lon) tuple given its id number """ return data[id]['lat'], data[id]['lon'] def find_short_path_nodes(aux_structures, node1, node2): """ Return the shortest path between the two nodes Parameters: aux_structures: the result of calling build_auxiliary_structures node1: node representing the start location node2: node representing the end location Returns: a list of node IDs representing the shortest path (in terms of distance) from node1 to node2 """ p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return list(p) if p is not None else None def gcd_heuristic(data, node1, node2): return great_circle_distance(get_coords(data, node1), get_coords(data, node2)) def find_short_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of distance) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return get_coord_list(aux_structures, p) if p is not None else None def get_closest_node(data, loc): """ Calculates the closest node in the given dataset to a specified query location Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them loc: The query location, given in terms of a tuple of two floats (latitude, longitude) """ min_dist = None closest = None for i in data: # Standard min-value search loop dist = great_circle_distance(get_coords(data, i), loc) if closest is None or dist < min_dist: closest = i min_dist = dist return closest def get_coord_list(data, ids): """ Converts a list of node ids to (latitude, longitude) tuples """ l = len(ids) coord_list = [None] * l for i in range(l): coord_list[i] = get_coords(data, ids[i]) return coord_list def find_fast_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations, in terms of expected time (taking into account speed limits). Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of time) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_speed_cost) return get_coord_list(aux_structures, p) if p is not None else None def get_speed_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the speed limit between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ start_node = data[start_node_id] dist_cost = get_dist_cost(data, start_node_id, end_node_id) # Get the speed limit along the way that connects the starting and ending nodes speed_limit_between_nodes = start_node['adjacent'][end_node_id] return dist_cost / speed_limit_between_nodes # Cost = Time = Distance/Rate def print_data(data): print('Nodes:') for i in data: print("id: " + str(i) + " | " + str(data[i])) if __name__ == '__main__': # additional code here will be run only when lab_old.py is invoked directly # (not when imported from test.py), so this is a good place to put code # used, for example, to generate the results for the online questions. # mit_set = build_auxiliary_structures('resources/mit.nodes', 'resources/mit.ways') # midwest_set = build_auxiliary_structures('resources/midwest.nodes', 'resources/midwest.ways') # cambridge_set = build_auxiliary_structures('resources/cambridge.nodes', 'resources/cambridge.ways') # print(len(find_fast_path(midwest_set, (41.375288, -89.459541), (41.452802, -89.443683)))) # Without heuristic: 372625 pulls # With heuristic: 45928 pulls # Start Server: python3 server.py cambridge # Distance Path: http://localhost:6009/ # Fast Path: http://localhost:6009/?type=fast pass
random_line_split
lab.py
#!/usr/bin/env python3 from util import read_osm_data, great_circle_distance, to_local_kml_url import time # NO ADDITIONAL IMPORTS! ALLOWED_HIGHWAY_TYPES = { 'motorway', 'trunk', 'primary', 'secondary', 'tertiary', 'unclassified', 'residential', 'living_street', 'motorway_link', 'trunk_link', 'primary_link', 'secondary_link', 'tertiary_link', } DEFAULT_SPEED_LIMIT_MPH = { 'motorway': 60, 'trunk': 45, 'primary': 35, 'secondary': 30, 'residential': 25, 'tertiary': 25, 'unclassified': 25, 'living_street': 10, 'motorway_link': 30, 'trunk_link': 30, 'primary_link': 30, 'secondary_link': 30, 'tertiary_link': 25, } def build_auxiliary_structures(nodes_filename, ways_filename): """ Create any auxiliary structures you are interested in, by reading the data from the given filenames (using read_osm_data) """ nodes = {} for way in read_osm_data(ways_filename): highway_type = way['tags'].get('highway', '( ͡° ͜ʖ ͡°)') if highway_type in ALLOWED_HIGHWAY_TYPES: nodes_along_way = way['nodes'] # List of nodes along this way for i in range(len(nodes_along_way) - 1): # A pair of adjacent nodes along this way left =
de in read_osm_data(nodes_filename): id = node['id'] if id in nodes: # If the id of this node in the generator was on a valid way, we add the data about that node # to its dictionary in nodes. # Add lat/lon data nodes[id]['lat'] = node['lat'] nodes[id]['lon'] = node['lon'] return nodes class Heap: def __init__(self, prop, start=None, start_item=None): self.property = 'min' if (prop == 'min') else 'max' # Heap property self.heap = [] # List representation of the heap self.items = [] # A list of the items corresponding to each index in the heap self.size = 0 if isinstance(start, list): self.heap = start[:] self.items = start_item if start_item is not None else [None] * len(start) self.size = len(self.heap) for i in range(len(start) // 2, -1, -1): # Second half of the heap is comprised entirely of leaves, so we know it fulfills our heap property # We loop backwards over the array and max heapify down so we always maintain our heap property # at every index after i self.heapify_down(i) elif start is not None: self.add(start, start_item) self.size = 1 def parent(self, i): # Returns the index of i's parent if it has one return (i + 1) // 2 - 1 if i > 0 else i def left(self, i): # Returns the index of i's left child if it has one return 2 * i + 1 if i < self.size else i def right(self, i): # Returns the index of i's right child if it has one return 2 * (i + 1) if i < self.size else i def add(self, val, item=None): # Add value to heap self.heap.append(val) self.items.append(item) self.size += 1 self.heapify_up(self.size - 1) def next(self): # Get the value at the top of the heap if self.size > 0: if self.size == 1: self.size -= 1 return self.heap.pop(0), self.items.pop(0) else: # Swap element at the top of the heap to the end self.swap(0, self.size - 1) top = self.heap.pop(self.size - 1) top_item = self.items.pop(self.size - 1) self.size -= 1 self.heapify_down(0) # Heapify from the top return top, top_item def heapify_up(self, i): # Assume everything below i fulfills the heap property, shift value at index i up until # our heap property is fulfilled across the entire heap p = self.parent(i) if not p == i and ((self.property == 'max' and self.heap[i] > self.heap[p]) or (self.property == 'min' and self.heap[i] < self.heap[p])): # If node i violates this heap's heap property, swap it with its parent, then check again: self.swap(i, p) self.heapify_up(p) def heapify_down(self, p): # Assume everything below p fulfills the heap property, shift value at index p down until # our heap property is fulfilled across the entire heap l, r = self.left(p), self.right(p) if l >= self.size: # If p has no children, we do nothing return if self.property == 'max': c = l if r >= self.size or self.heap[l] > self.heap[r] else r if self.heap[p] < self.heap[c]: # If node p violates this heap's max heap property, swap it with its larger child, then check again: self.swap(p, c) self.heapify_down(c) else: # if property == 'min' c = l if r >= self.size or self.heap[l] < self.heap[r] else r if self.heap[p] > self.heap[c]: # If node p violates this heap's min heap property, swap it with its smaller child, then check again: self.swap(p, c) self.heapify_down(c) def swap(self, a, b): # Swaps the elements of heap and items at indices a and b self.heap[a], self.heap[b] = self.heap[b], self.heap[a] self.items[a], self.items[b] = self.items[b], self.items[a] def empty(self): # Returns true if this heap has no elements return self.size == 0 def __str__(self): # Returns the heap in the form of a list return str(self.heap) def find_min_cost_path(data, start, is_goal, get_children, cost, heuristic=lambda x: 0): """haha, uniform cost search go brrr""" paths = Heap('min', heuristic(start), ([start], 0)) # Min heap of paths and their respective costs (sorted by heuristic cost) seen = set() # Set of nodes we've already found shorter paths to # T H E S E A R C H L O O P B E G I N S while not paths.empty(): next_path = paths.next() # get the minimum cost path (heuristic cost, (path, path cost)) min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] while terminal_node in seen: # If we've already found a path to the same node with a lower cost, we pick a new next_path if paths.empty(): # If we run out of paths to search, we return nothing return None next_path = paths.next() min_cost_path = next_path[1][0] min_cost = next_path[1][1] terminal_node = min_cost_path[-1] if is_goal(terminal_node): return min_cost_path seen.add(terminal_node) children = get_children(terminal_node) for c in children: if c not in seen: # If this child does not have an existing path to it already, we build a # data structure for it and at it to our min heap path_to_c = min_cost_path + [c] c_cost = min_cost + cost(data, terminal_node, c) c_heuristic = c_cost + heuristic(c) paths.add(c_heuristic, (path_to_c, c_cost)) # T H E S E A R C H L O O P E N D S return None # We failed to find a path to the goal node. Very sad. Return nothing :( def get_dist_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the distance between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ p1 = get_coords(data, start_node_id) p2 = get_coords(data, end_node_id) return great_circle_distance(p1, p2) def get_coords(data, id): """ Returns the GPS coordinates of a node in the form of a (lat, lon) tuple given its id number """ return data[id]['lat'], data[id]['lon'] def find_short_path_nodes(aux_structures, node1, node2): """ Return the shortest path between the two nodes Parameters: aux_structures: the result of calling build_auxiliary_structures node1: node representing the start location node2: node representing the end location Returns: a list of node IDs representing the shortest path (in terms of distance) from node1 to node2 """ p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return list(p) if p is not None else None def gcd_heuristic(data, node1, node2): return great_circle_distance(get_coords(data, node1), get_coords(data, node2)) def find_short_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of distance) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_dist_cost, lambda x: gcd_heuristic(aux_structures, x, node2)) return get_coord_list(aux_structures, p) if p is not None else None def get_closest_node(data, loc): """ Calculates the closest node in the given dataset to a specified query location Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them loc: The query location, given in terms of a tuple of two floats (latitude, longitude) """ min_dist = None closest = None for i in data: # Standard min-value search loop dist = great_circle_distance(get_coords(data, i), loc) if closest is None or dist < min_dist: closest = i min_dist = dist return closest def get_coord_list(data, ids): """ Converts a list of node ids to (latitude, longitude) tuples """ l = len(ids) coord_list = [None] * l for i in range(l): coord_list[i] = get_coords(data, ids[i]) return coord_list def find_fast_path(aux_structures, loc1, loc2): """ Return the shortest path between the two locations, in terms of expected time (taking into account speed limits). Parameters: aux_structures: the result of calling build_auxiliary_structures loc1: tuple of 2 floats: (latitude, longitude), representing the start location loc2: tuple of 2 floats: (latitude, longitude), representing the end location Returns: a list of (latitude, longitude) tuples representing the shortest path (in terms of time) from loc1 to loc2. """ node1 = get_closest_node(aux_structures, loc1) node2 = get_closest_node(aux_structures, loc2) p = find_min_cost_path( aux_structures, node1, lambda x: x == node2, lambda parent_id: aux_structures[parent_id]['adjacent'], get_speed_cost) return get_coord_list(aux_structures, p) if p is not None else None def get_speed_cost(data, start_node_id, end_node_id): """ Calculates the cost of the direct path (which is assume to exist) between specified start and end nodes based on the speed limit between them. Parameters: data: The auxiliary data structure (a dictionary) that stores information about nodes and the ways that connect them start_node_id: The integer id of the start node in data end_node_id: The integer id of the end node in data """ start_node = data[start_node_id] dist_cost = get_dist_cost(data, start_node_id, end_node_id) # Get the speed limit along the way that connects the starting and ending nodes speed_limit_between_nodes = start_node['adjacent'][end_node_id] return dist_cost / speed_limit_between_nodes # Cost = Time = Distance/Rate def print_data(data): print('Nodes:') for i in data: print("id: " + str(i) + " | " + str(data[i])) if __name__ == '__main__': # additional code here will be run only when lab_old.py is invoked directly # (not when imported from test.py), so this is a good place to put code # used, for example, to generate the results for the online questions. # mit_set = build_auxiliary_structures('resources/mit.nodes', 'resources/mit.ways') # midwest_set = build_auxiliary_structures('resources/midwest.nodes', 'resources/midwest.ways') # cambridge_set = build_auxiliary_structures('resources/cambridge.nodes', 'resources/cambridge.ways') # print(len(find_fast_path(midwest_set, (41.375288, -89.459541), (41.452802, -89.443683)))) # Without heuristic: 372625 pulls # With heuristic: 45928 pulls # Start Server: python3 server.py cambridge # Distance Path: http://localhost:6009/ # Fast Path: http://localhost:6009/?type=fast pass
nodes_along_way[i] right = nodes_along_way[i + 1] default_speed_limit = DEFAULT_SPEED_LIMIT_MPH[highway_type] # If this way doesn't have a speed limit tag, we use the default value based on highway type speed_limit = way['tags'].get('maxspeed_mph', default_speed_limit) def build_data(root, adjacent): """ root: ID of some node along way adjacent: ID of some node adjacent to root node along way """ new_node_data_struct = {'adjacent': {adjacent: speed_limit}} # Init dict for node data structure root_data = nodes.get(root, new_node_data_struct) # There might be another way where root and adjacent are directly adjacent, so our # speed limit is the max of the speed limits of those two ways: root_data['adjacent'][adjacent] = max(root_data['adjacent'].get(adjacent, 0), speed_limit) nodes[root] = root_data # Add the data on root to our dictionary of node data build_data(left, right) if not way['tags'].get('oneway', '( ͡° ͜ʖ ͡°)') == 'yes': # If this isn't a oneway way, we can build the data structure for the next node as well build_data(right, left) elif right == nodes_along_way[-1]: # In non-oneway ways, the above build_data(right, left) call creates the data structure # for the final node at the same time as the penultimate one. However, in the case of a # oneway path, we have to do it manually: nodes[right] = nodes.get(right, {'adjacent': {}}) for no
conditional_block
cassandra.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cassandra import ( "context" "reflect" "time" "github.com/uber/peloton/pkg/storage/objects/base" "github.com/uber/peloton/pkg/storage/orm" "github.com/gocql/gocql" log "github.com/sirupsen/logrus" "github.com/uber-go/tally" "go.uber.org/yarpc/yarpcerrors" ) const ( _defaultRetryTimeout = 50 * time.Millisecond _defaultRetryAttempts = 5 useCasWrite = true ) const ( // operation tags for metrics create = "create" cas = "cas" get = "get" getIter = "get_iter" update = "update" del = "delete" ) type cassandraConnector struct { // implements orm.Connector interface orm.Connector // Session is the gocql session created for this connector Session *gocql.Session // scope is the storage scope for metrics scope tally.Scope // scope is the storage scope for success metrics executeSuccessScope tally.Scope // scope is the storage scope for failure metrics executeFailScope tally.Scope // Conf is the Cassandra connector config for this cluster Conf *Config } // NewCassandraConnector initializes a Cassandra Connector func NewCassandraConnector( config *Config, scope tally.Scope, ) (orm.Connector, error) { session, err := CreateStoreSession( config.CassandraConn, config.StoreName) if err != nil { return nil, err } // create a storeScope for the keyspace StoreName storeScope := scope.SubScope("cql").Tagged( map[string]string{"store": config.StoreName}) return &cassandraConnector{ Session: session, scope: storeScope, executeSuccessScope: storeScope.Tagged( map[string]string{"result": "success"}), executeFailScope: storeScope.Tagged( map[string]string{"result": "fail"}), Conf: config, }, nil } // ensure that implementation (cassandraConnector) satisfies the interface var _ orm.Connector = (*cassandraConnector)(nil) // getGocqlErrorTag gets a error tag for metrics based on gocql error // We cannot just use err.Error() as a tag because it contains invalid // characters like = : etc. which will be rejected by M3 func getGocqlErrorTag(err error) string { if yarpcerrors.IsAlreadyExists(err) { return "already_exists" } if yarpcerrors.IsNotFound(err) { return "not_found" } switch err.(type) { case *gocql.RequestErrReadFailure: return "read_failure" case *gocql.RequestErrWriteFailure: return "write_failure" case *gocql.RequestErrAlreadyExists: return "already_exists" case *gocql.RequestErrReadTimeout: return "read_timeout" case *gocql.RequestErrWriteTimeout: return "write_timeout" case *gocql.RequestErrUnavailable: return "unavailable" case *gocql.RequestErrFunctionFailure: return "function_failure" case *gocql.RequestErrUnprepared: return "unprepared" default: return "unknown" } } // buildResultRow is used to allocate memory for the row to be populated by // Cassandra read operation based on what object fields are being read func buildResultRow(e *base.Definition, columns []string) []interface{} { results := make([]interface{}, len(columns)) timeType := reflect.ValueOf(time.Now()) gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now())) for i, column := range columns { // get the type of the field from the ColumnToType mapping for object // That we we can allocate appropriate memory for this field typ := e.ColumnToType[column] switch typ.Kind() { case reflect.String: var value *string results[i] = &value case reflect.Int32, reflect.Uint32, reflect.Int: // C* internally uses int and int64 var value *int results[i] = &value case reflect.Int64, reflect.Uint64: // C* internally uses int and int64 var value *int64 results[i] = &value case reflect.Bool: var value *bool results[i] = &value case reflect.Slice: var value *[]byte results[i] = &value case timeType.Kind(): var value *time.Time results[i] = &value case gocqlUUIDType.Kind(): var value *gocql.UUID results[i] = &value case reflect.Ptr: // Special case for custom optional string type: // string type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalString{}) { var value *string results[i] = &value break } // Special case for custom optional int type: // int64 type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalUInt64{}) { var value *int64 results[i] = &value break } // for unrecognized pointer types, fall back to default logging fallthrough default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{"type": typ.Kind(), "column": column}). Infof("type not found") } } return results } // getRowFromResult translates a row read from Cassandra into a list of // base.Column to be interpreted by base store client func getRowFromResult( e *base.Definition, columnNames []string, columnVals []interface{}, ) []base.Column { row := make([]base.Column, 0, len(columnNames)) for i, columnName := range columnNames { // construct a list of column objects from the lists of column names // and values that were returned by the cassandra query column := base.Column{ Name: columnName, } switch rv := columnVals[i].(type) { case **int: column.Value = *rv case **int64: column.Value = *rv case **string: column.Value = *rv case **gocql.UUID: column.Value = *rv case **time.Time: column.Value = *rv case **bool: column.Value = *rv case **[]byte: column.Value = *rv default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{ "data": columnVals[i], "column": columnName}).Infof("type not found") } row = append(row, column) } return row } // splitColumnNameValue is used to return list of column names and list of their // corresponding value. Order is very important in this lists as they will be // used separately when constructing the CQL query. func splitColumnNameValue(row []base.Column) ( colNames []string, colValues []interface{}) { // Split row into two lists of column names and column values. // So for a location `i` in the list, the colNames[i] and colValues[i] will // represent row[i] for _, column := range row { colNames = append(colNames, column.Name) colValues = append(colValues, column.Value) } return colNames, colValues } // Create creates a new row in DB if it already doesn't exist. Uses CAS write. func (c *cassandraConnector) CreateIfNotExists( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, useCasWrite) } // Create creates a new row in DB. func (c *cassandraConnector) Create( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, !useCasWrite) } func (c *cassandraConnector) create( ctx context.Context, e *base.Definition, row []base.Column, casWrite bool, ) error { // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare insert statement stmt, err := InsertStmt( Table(e.Name), Columns(colNames), Values(colValues), IfNotExist(casWrite), ) if err != nil { return err } operation := create if casWrite { operation = cas } q := c.Session.Query(stmt, colValues...).WithContext(ctx) if casWrite { applied, err := q.MapScanCAS(map[string]interface{}{}) if err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } if !applied { return yarpcerrors.AlreadyExistsErrorf("item already exists") } } else { if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } } sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, operation, nil) return nil } // buildSelectQuery builds a select query using base object and key columns func (c *cassandraConnector) buildSelectQuery( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead []string, ) (*gocql.Query, error) { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare select statement stmt, err := SelectStmt( Table(e.Name), Columns(colNamesToRead),
if err != nil { return nil, err } return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil } // Get fetches a record from DB using primary keys func (c *cassandraConnector) Get( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead ...string, ) ([]base.Column, error) { if len(colNamesToRead) == 0 { colNamesToRead = e.GetColumnsToRead() } q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // build a result row result := buildResultRow(e, colNamesToRead) if err := q.Scan(result...); err != nil { if err == gocql.ErrNotFound { err = yarpcerrors.NotFoundErrorf(err.Error()) } sendCounters(c.executeFailScope, e.Name, get, err) return nil, err } sendLatency(c.scope, e.Name, get, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, get, nil) // translate the read result into a row ([]base.Column) return getRowFromResult(e, colNamesToRead, result), nil } // GetAll fetches all rows from DB using partition keys func (c *cassandraConnector) GetAll( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (rows [][]base.Column, errors error) { iter, err := c.GetAllIter(ctx, e, keyCols) if err != nil { return nil, err } defer iter.Close() for { row, errors := iter.Next() if errors != nil { return nil, errors } if row != nil { rows = append(rows, row) } else { return rows, nil } } } // GetAllIter gives an iterator to fetch all rows from DB func (c *cassandraConnector) GetAllIter( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (iter orm.Iterator, err error) { colNamesToRead := e.GetColumnsToRead() q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // execute query and get iterator cqlIter := q.Iter() sendLatency(c.scope, e.Name, getIter, time.Duration(q.Latency())) return newIterator( e, colNamesToRead, c.executeSuccessScope, c.executeFailScope, cqlIter, ), nil } // Delete deletes a record from DB using primary keys func (c *cassandraConnector) Delete( ctx context.Context, e *base.Definition, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare delete statement stmt, err := DeleteStmt( Table(e.Name), Conditions(keyColNames), ) if err != nil { return err } q := c.Session.Query(stmt, keyColValues...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, del, err) return err } sendLatency(c.scope, e.Name, del, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, del, nil) return nil } // Update updates an existing row in DB. func (c *cassandraConnector) Update( ctx context.Context, e *base.Definition, row []base.Column, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare update statement stmt, err := UpdateStmt( Table(e.Name), Updates(colNames), Conditions(keyColNames), ) if err != nil { return err } // list of values to be supplied in the query updateVals := append(colValues, keyColValues...) q := c.Session.Query( stmt, updateVals...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, update, err) return err } sendLatency(c.scope, e.Name, update, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, update, nil) return nil } // cassandraIterator implements interface Iterator for Cassandra type cassandraIterator struct { cqlIter *gocql.Iter tableDef *base.Definition colNamesToRead []string successScope tally.Scope failScope tally.Scope } // ensure that implementation (cassandraIterator) satisfies the interface var _ orm.Iterator = (*cassandraIterator)(nil) func newIterator( e *base.Definition, cols []string, successScope tally.Scope, failScope tally.Scope, cqlIter *gocql.Iter, ) *cassandraIterator { return &cassandraIterator{ cqlIter: cqlIter, tableDef: e, successScope: successScope, failScope: failScope, colNamesToRead: cols, } } func (iter *cassandraIterator) Close() { iter.cqlIter.Close() } func (iter *cassandraIterator) Next() ([]base.Column, error) { result := buildResultRow(iter.tableDef, iter.colNamesToRead) if iter.cqlIter.Scan(result...) { row := getRowFromResult(iter.tableDef, iter.colNamesToRead, result) return row, nil } // Either end-of-results or error if errors := iter.cqlIter.Close(); errors != nil { sendCounters(iter.failScope, iter.tableDef.Name, getIter, errors) return nil, errors } sendCounters(iter.successScope, iter.tableDef.Name, getIter, nil) return nil, nil } // helper function to record call latency metric func sendLatency( scope tally.Scope, table, operation string, d time.Duration, ) { s := scope.Tagged(map[string]string{ "table": table, "operation": operation, }) s.Timer("execute_latency").Record(d) } // helper function to record cql query success/failure metrics func sendCounters( scope tally.Scope, table, operation string, err error, ) { errMsg := "none" if err != nil { errMsg = getGocqlErrorTag(err) } s := scope.Tagged(map[string]string{ "table": table, "operation": operation, "error": errMsg, }) s.Counter("execute").Inc(1) }
Conditions(keyColNames), )
random_line_split
cassandra.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cassandra import ( "context" "reflect" "time" "github.com/uber/peloton/pkg/storage/objects/base" "github.com/uber/peloton/pkg/storage/orm" "github.com/gocql/gocql" log "github.com/sirupsen/logrus" "github.com/uber-go/tally" "go.uber.org/yarpc/yarpcerrors" ) const ( _defaultRetryTimeout = 50 * time.Millisecond _defaultRetryAttempts = 5 useCasWrite = true ) const ( // operation tags for metrics create = "create" cas = "cas" get = "get" getIter = "get_iter" update = "update" del = "delete" ) type cassandraConnector struct { // implements orm.Connector interface orm.Connector // Session is the gocql session created for this connector Session *gocql.Session // scope is the storage scope for metrics scope tally.Scope // scope is the storage scope for success metrics executeSuccessScope tally.Scope // scope is the storage scope for failure metrics executeFailScope tally.Scope // Conf is the Cassandra connector config for this cluster Conf *Config } // NewCassandraConnector initializes a Cassandra Connector func NewCassandraConnector( config *Config, scope tally.Scope, ) (orm.Connector, error) { session, err := CreateStoreSession( config.CassandraConn, config.StoreName) if err != nil { return nil, err } // create a storeScope for the keyspace StoreName storeScope := scope.SubScope("cql").Tagged( map[string]string{"store": config.StoreName}) return &cassandraConnector{ Session: session, scope: storeScope, executeSuccessScope: storeScope.Tagged( map[string]string{"result": "success"}), executeFailScope: storeScope.Tagged( map[string]string{"result": "fail"}), Conf: config, }, nil } // ensure that implementation (cassandraConnector) satisfies the interface var _ orm.Connector = (*cassandraConnector)(nil) // getGocqlErrorTag gets a error tag for metrics based on gocql error // We cannot just use err.Error() as a tag because it contains invalid // characters like = : etc. which will be rejected by M3 func getGocqlErrorTag(err error) string { if yarpcerrors.IsAlreadyExists(err) { return "already_exists" } if yarpcerrors.IsNotFound(err) { return "not_found" } switch err.(type) { case *gocql.RequestErrReadFailure: return "read_failure" case *gocql.RequestErrWriteFailure: return "write_failure" case *gocql.RequestErrAlreadyExists: return "already_exists" case *gocql.RequestErrReadTimeout: return "read_timeout" case *gocql.RequestErrWriteTimeout: return "write_timeout" case *gocql.RequestErrUnavailable: return "unavailable" case *gocql.RequestErrFunctionFailure: return "function_failure" case *gocql.RequestErrUnprepared: return "unprepared" default: return "unknown" } } // buildResultRow is used to allocate memory for the row to be populated by // Cassandra read operation based on what object fields are being read func buildResultRow(e *base.Definition, columns []string) []interface{} { results := make([]interface{}, len(columns)) timeType := reflect.ValueOf(time.Now()) gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now())) for i, column := range columns { // get the type of the field from the ColumnToType mapping for object // That we we can allocate appropriate memory for this field typ := e.ColumnToType[column] switch typ.Kind() { case reflect.String: var value *string results[i] = &value case reflect.Int32, reflect.Uint32, reflect.Int: // C* internally uses int and int64 var value *int results[i] = &value case reflect.Int64, reflect.Uint64: // C* internally uses int and int64 var value *int64 results[i] = &value case reflect.Bool: var value *bool results[i] = &value case reflect.Slice: var value *[]byte results[i] = &value case timeType.Kind(): var value *time.Time results[i] = &value case gocqlUUIDType.Kind(): var value *gocql.UUID results[i] = &value case reflect.Ptr: // Special case for custom optional string type: // string type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalString{}) { var value *string results[i] = &value break } // Special case for custom optional int type: // int64 type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalUInt64{}) { var value *int64 results[i] = &value break } // for unrecognized pointer types, fall back to default logging fallthrough default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{"type": typ.Kind(), "column": column}). Infof("type not found") } } return results } // getRowFromResult translates a row read from Cassandra into a list of // base.Column to be interpreted by base store client func
( e *base.Definition, columnNames []string, columnVals []interface{}, ) []base.Column { row := make([]base.Column, 0, len(columnNames)) for i, columnName := range columnNames { // construct a list of column objects from the lists of column names // and values that were returned by the cassandra query column := base.Column{ Name: columnName, } switch rv := columnVals[i].(type) { case **int: column.Value = *rv case **int64: column.Value = *rv case **string: column.Value = *rv case **gocql.UUID: column.Value = *rv case **time.Time: column.Value = *rv case **bool: column.Value = *rv case **[]byte: column.Value = *rv default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{ "data": columnVals[i], "column": columnName}).Infof("type not found") } row = append(row, column) } return row } // splitColumnNameValue is used to return list of column names and list of their // corresponding value. Order is very important in this lists as they will be // used separately when constructing the CQL query. func splitColumnNameValue(row []base.Column) ( colNames []string, colValues []interface{}) { // Split row into two lists of column names and column values. // So for a location `i` in the list, the colNames[i] and colValues[i] will // represent row[i] for _, column := range row { colNames = append(colNames, column.Name) colValues = append(colValues, column.Value) } return colNames, colValues } // Create creates a new row in DB if it already doesn't exist. Uses CAS write. func (c *cassandraConnector) CreateIfNotExists( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, useCasWrite) } // Create creates a new row in DB. func (c *cassandraConnector) Create( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, !useCasWrite) } func (c *cassandraConnector) create( ctx context.Context, e *base.Definition, row []base.Column, casWrite bool, ) error { // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare insert statement stmt, err := InsertStmt( Table(e.Name), Columns(colNames), Values(colValues), IfNotExist(casWrite), ) if err != nil { return err } operation := create if casWrite { operation = cas } q := c.Session.Query(stmt, colValues...).WithContext(ctx) if casWrite { applied, err := q.MapScanCAS(map[string]interface{}{}) if err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } if !applied { return yarpcerrors.AlreadyExistsErrorf("item already exists") } } else { if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } } sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, operation, nil) return nil } // buildSelectQuery builds a select query using base object and key columns func (c *cassandraConnector) buildSelectQuery( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead []string, ) (*gocql.Query, error) { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare select statement stmt, err := SelectStmt( Table(e.Name), Columns(colNamesToRead), Conditions(keyColNames), ) if err != nil { return nil, err } return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil } // Get fetches a record from DB using primary keys func (c *cassandraConnector) Get( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead ...string, ) ([]base.Column, error) { if len(colNamesToRead) == 0 { colNamesToRead = e.GetColumnsToRead() } q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // build a result row result := buildResultRow(e, colNamesToRead) if err := q.Scan(result...); err != nil { if err == gocql.ErrNotFound { err = yarpcerrors.NotFoundErrorf(err.Error()) } sendCounters(c.executeFailScope, e.Name, get, err) return nil, err } sendLatency(c.scope, e.Name, get, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, get, nil) // translate the read result into a row ([]base.Column) return getRowFromResult(e, colNamesToRead, result), nil } // GetAll fetches all rows from DB using partition keys func (c *cassandraConnector) GetAll( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (rows [][]base.Column, errors error) { iter, err := c.GetAllIter(ctx, e, keyCols) if err != nil { return nil, err } defer iter.Close() for { row, errors := iter.Next() if errors != nil { return nil, errors } if row != nil { rows = append(rows, row) } else { return rows, nil } } } // GetAllIter gives an iterator to fetch all rows from DB func (c *cassandraConnector) GetAllIter( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (iter orm.Iterator, err error) { colNamesToRead := e.GetColumnsToRead() q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // execute query and get iterator cqlIter := q.Iter() sendLatency(c.scope, e.Name, getIter, time.Duration(q.Latency())) return newIterator( e, colNamesToRead, c.executeSuccessScope, c.executeFailScope, cqlIter, ), nil } // Delete deletes a record from DB using primary keys func (c *cassandraConnector) Delete( ctx context.Context, e *base.Definition, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare delete statement stmt, err := DeleteStmt( Table(e.Name), Conditions(keyColNames), ) if err != nil { return err } q := c.Session.Query(stmt, keyColValues...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, del, err) return err } sendLatency(c.scope, e.Name, del, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, del, nil) return nil } // Update updates an existing row in DB. func (c *cassandraConnector) Update( ctx context.Context, e *base.Definition, row []base.Column, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare update statement stmt, err := UpdateStmt( Table(e.Name), Updates(colNames), Conditions(keyColNames), ) if err != nil { return err } // list of values to be supplied in the query updateVals := append(colValues, keyColValues...) q := c.Session.Query( stmt, updateVals...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, update, err) return err } sendLatency(c.scope, e.Name, update, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, update, nil) return nil } // cassandraIterator implements interface Iterator for Cassandra type cassandraIterator struct { cqlIter *gocql.Iter tableDef *base.Definition colNamesToRead []string successScope tally.Scope failScope tally.Scope } // ensure that implementation (cassandraIterator) satisfies the interface var _ orm.Iterator = (*cassandraIterator)(nil) func newIterator( e *base.Definition, cols []string, successScope tally.Scope, failScope tally.Scope, cqlIter *gocql.Iter, ) *cassandraIterator { return &cassandraIterator{ cqlIter: cqlIter, tableDef: e, successScope: successScope, failScope: failScope, colNamesToRead: cols, } } func (iter *cassandraIterator) Close() { iter.cqlIter.Close() } func (iter *cassandraIterator) Next() ([]base.Column, error) { result := buildResultRow(iter.tableDef, iter.colNamesToRead) if iter.cqlIter.Scan(result...) { row := getRowFromResult(iter.tableDef, iter.colNamesToRead, result) return row, nil } // Either end-of-results or error if errors := iter.cqlIter.Close(); errors != nil { sendCounters(iter.failScope, iter.tableDef.Name, getIter, errors) return nil, errors } sendCounters(iter.successScope, iter.tableDef.Name, getIter, nil) return nil, nil } // helper function to record call latency metric func sendLatency( scope tally.Scope, table, operation string, d time.Duration, ) { s := scope.Tagged(map[string]string{ "table": table, "operation": operation, }) s.Timer("execute_latency").Record(d) } // helper function to record cql query success/failure metrics func sendCounters( scope tally.Scope, table, operation string, err error, ) { errMsg := "none" if err != nil { errMsg = getGocqlErrorTag(err) } s := scope.Tagged(map[string]string{ "table": table, "operation": operation, "error": errMsg, }) s.Counter("execute").Inc(1) }
getRowFromResult
identifier_name
cassandra.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cassandra import ( "context" "reflect" "time" "github.com/uber/peloton/pkg/storage/objects/base" "github.com/uber/peloton/pkg/storage/orm" "github.com/gocql/gocql" log "github.com/sirupsen/logrus" "github.com/uber-go/tally" "go.uber.org/yarpc/yarpcerrors" ) const ( _defaultRetryTimeout = 50 * time.Millisecond _defaultRetryAttempts = 5 useCasWrite = true ) const ( // operation tags for metrics create = "create" cas = "cas" get = "get" getIter = "get_iter" update = "update" del = "delete" ) type cassandraConnector struct { // implements orm.Connector interface orm.Connector // Session is the gocql session created for this connector Session *gocql.Session // scope is the storage scope for metrics scope tally.Scope // scope is the storage scope for success metrics executeSuccessScope tally.Scope // scope is the storage scope for failure metrics executeFailScope tally.Scope // Conf is the Cassandra connector config for this cluster Conf *Config } // NewCassandraConnector initializes a Cassandra Connector func NewCassandraConnector( config *Config, scope tally.Scope, ) (orm.Connector, error) { session, err := CreateStoreSession( config.CassandraConn, config.StoreName) if err != nil { return nil, err } // create a storeScope for the keyspace StoreName storeScope := scope.SubScope("cql").Tagged( map[string]string{"store": config.StoreName}) return &cassandraConnector{ Session: session, scope: storeScope, executeSuccessScope: storeScope.Tagged( map[string]string{"result": "success"}), executeFailScope: storeScope.Tagged( map[string]string{"result": "fail"}), Conf: config, }, nil } // ensure that implementation (cassandraConnector) satisfies the interface var _ orm.Connector = (*cassandraConnector)(nil) // getGocqlErrorTag gets a error tag for metrics based on gocql error // We cannot just use err.Error() as a tag because it contains invalid // characters like = : etc. which will be rejected by M3 func getGocqlErrorTag(err error) string { if yarpcerrors.IsAlreadyExists(err) { return "already_exists" } if yarpcerrors.IsNotFound(err) { return "not_found" } switch err.(type) { case *gocql.RequestErrReadFailure: return "read_failure" case *gocql.RequestErrWriteFailure: return "write_failure" case *gocql.RequestErrAlreadyExists: return "already_exists" case *gocql.RequestErrReadTimeout: return "read_timeout" case *gocql.RequestErrWriteTimeout: return "write_timeout" case *gocql.RequestErrUnavailable: return "unavailable" case *gocql.RequestErrFunctionFailure: return "function_failure" case *gocql.RequestErrUnprepared: return "unprepared" default: return "unknown" } } // buildResultRow is used to allocate memory for the row to be populated by // Cassandra read operation based on what object fields are being read func buildResultRow(e *base.Definition, columns []string) []interface{} { results := make([]interface{}, len(columns)) timeType := reflect.ValueOf(time.Now()) gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now())) for i, column := range columns { // get the type of the field from the ColumnToType mapping for object // That we we can allocate appropriate memory for this field typ := e.ColumnToType[column] switch typ.Kind() { case reflect.String: var value *string results[i] = &value case reflect.Int32, reflect.Uint32, reflect.Int: // C* internally uses int and int64 var value *int results[i] = &value case reflect.Int64, reflect.Uint64: // C* internally uses int and int64 var value *int64 results[i] = &value case reflect.Bool: var value *bool results[i] = &value case reflect.Slice: var value *[]byte results[i] = &value case timeType.Kind(): var value *time.Time results[i] = &value case gocqlUUIDType.Kind(): var value *gocql.UUID results[i] = &value case reflect.Ptr: // Special case for custom optional string type: // string type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalString{}) { var value *string results[i] = &value break } // Special case for custom optional int type: // int64 type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalUInt64{}) { var value *int64 results[i] = &value break } // for unrecognized pointer types, fall back to default logging fallthrough default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{"type": typ.Kind(), "column": column}). Infof("type not found") } } return results } // getRowFromResult translates a row read from Cassandra into a list of // base.Column to be interpreted by base store client func getRowFromResult( e *base.Definition, columnNames []string, columnVals []interface{}, ) []base.Column { row := make([]base.Column, 0, len(columnNames)) for i, columnName := range columnNames
return row } // splitColumnNameValue is used to return list of column names and list of their // corresponding value. Order is very important in this lists as they will be // used separately when constructing the CQL query. func splitColumnNameValue(row []base.Column) ( colNames []string, colValues []interface{}) { // Split row into two lists of column names and column values. // So for a location `i` in the list, the colNames[i] and colValues[i] will // represent row[i] for _, column := range row { colNames = append(colNames, column.Name) colValues = append(colValues, column.Value) } return colNames, colValues } // Create creates a new row in DB if it already doesn't exist. Uses CAS write. func (c *cassandraConnector) CreateIfNotExists( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, useCasWrite) } // Create creates a new row in DB. func (c *cassandraConnector) Create( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, !useCasWrite) } func (c *cassandraConnector) create( ctx context.Context, e *base.Definition, row []base.Column, casWrite bool, ) error { // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare insert statement stmt, err := InsertStmt( Table(e.Name), Columns(colNames), Values(colValues), IfNotExist(casWrite), ) if err != nil { return err } operation := create if casWrite { operation = cas } q := c.Session.Query(stmt, colValues...).WithContext(ctx) if casWrite { applied, err := q.MapScanCAS(map[string]interface{}{}) if err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } if !applied { return yarpcerrors.AlreadyExistsErrorf("item already exists") } } else { if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } } sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, operation, nil) return nil } // buildSelectQuery builds a select query using base object and key columns func (c *cassandraConnector) buildSelectQuery( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead []string, ) (*gocql.Query, error) { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare select statement stmt, err := SelectStmt( Table(e.Name), Columns(colNamesToRead), Conditions(keyColNames), ) if err != nil { return nil, err } return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil } // Get fetches a record from DB using primary keys func (c *cassandraConnector) Get( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead ...string, ) ([]base.Column, error) { if len(colNamesToRead) == 0 { colNamesToRead = e.GetColumnsToRead() } q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // build a result row result := buildResultRow(e, colNamesToRead) if err := q.Scan(result...); err != nil { if err == gocql.ErrNotFound { err = yarpcerrors.NotFoundErrorf(err.Error()) } sendCounters(c.executeFailScope, e.Name, get, err) return nil, err } sendLatency(c.scope, e.Name, get, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, get, nil) // translate the read result into a row ([]base.Column) return getRowFromResult(e, colNamesToRead, result), nil } // GetAll fetches all rows from DB using partition keys func (c *cassandraConnector) GetAll( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (rows [][]base.Column, errors error) { iter, err := c.GetAllIter(ctx, e, keyCols) if err != nil { return nil, err } defer iter.Close() for { row, errors := iter.Next() if errors != nil { return nil, errors } if row != nil { rows = append(rows, row) } else { return rows, nil } } } // GetAllIter gives an iterator to fetch all rows from DB func (c *cassandraConnector) GetAllIter( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (iter orm.Iterator, err error) { colNamesToRead := e.GetColumnsToRead() q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // execute query and get iterator cqlIter := q.Iter() sendLatency(c.scope, e.Name, getIter, time.Duration(q.Latency())) return newIterator( e, colNamesToRead, c.executeSuccessScope, c.executeFailScope, cqlIter, ), nil } // Delete deletes a record from DB using primary keys func (c *cassandraConnector) Delete( ctx context.Context, e *base.Definition, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare delete statement stmt, err := DeleteStmt( Table(e.Name), Conditions(keyColNames), ) if err != nil { return err } q := c.Session.Query(stmt, keyColValues...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, del, err) return err } sendLatency(c.scope, e.Name, del, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, del, nil) return nil } // Update updates an existing row in DB. func (c *cassandraConnector) Update( ctx context.Context, e *base.Definition, row []base.Column, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare update statement stmt, err := UpdateStmt( Table(e.Name), Updates(colNames), Conditions(keyColNames), ) if err != nil { return err } // list of values to be supplied in the query updateVals := append(colValues, keyColValues...) q := c.Session.Query( stmt, updateVals...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, update, err) return err } sendLatency(c.scope, e.Name, update, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, update, nil) return nil } // cassandraIterator implements interface Iterator for Cassandra type cassandraIterator struct { cqlIter *gocql.Iter tableDef *base.Definition colNamesToRead []string successScope tally.Scope failScope tally.Scope } // ensure that implementation (cassandraIterator) satisfies the interface var _ orm.Iterator = (*cassandraIterator)(nil) func newIterator( e *base.Definition, cols []string, successScope tally.Scope, failScope tally.Scope, cqlIter *gocql.Iter, ) *cassandraIterator { return &cassandraIterator{ cqlIter: cqlIter, tableDef: e, successScope: successScope, failScope: failScope, colNamesToRead: cols, } } func (iter *cassandraIterator) Close() { iter.cqlIter.Close() } func (iter *cassandraIterator) Next() ([]base.Column, error) { result := buildResultRow(iter.tableDef, iter.colNamesToRead) if iter.cqlIter.Scan(result...) { row := getRowFromResult(iter.tableDef, iter.colNamesToRead, result) return row, nil } // Either end-of-results or error if errors := iter.cqlIter.Close(); errors != nil { sendCounters(iter.failScope, iter.tableDef.Name, getIter, errors) return nil, errors } sendCounters(iter.successScope, iter.tableDef.Name, getIter, nil) return nil, nil } // helper function to record call latency metric func sendLatency( scope tally.Scope, table, operation string, d time.Duration, ) { s := scope.Tagged(map[string]string{ "table": table, "operation": operation, }) s.Timer("execute_latency").Record(d) } // helper function to record cql query success/failure metrics func sendCounters( scope tally.Scope, table, operation string, err error, ) { errMsg := "none" if err != nil { errMsg = getGocqlErrorTag(err) } s := scope.Tagged(map[string]string{ "table": table, "operation": operation, "error": errMsg, }) s.Counter("execute").Inc(1) }
{ // construct a list of column objects from the lists of column names // and values that were returned by the cassandra query column := base.Column{ Name: columnName, } switch rv := columnVals[i].(type) { case **int: column.Value = *rv case **int64: column.Value = *rv case **string: column.Value = *rv case **gocql.UUID: column.Value = *rv case **time.Time: column.Value = *rv case **bool: column.Value = *rv case **[]byte: column.Value = *rv default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{ "data": columnVals[i], "column": columnName}).Infof("type not found") } row = append(row, column) }
conditional_block
cassandra.go
// Copyright (c) 2019 Uber Technologies, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cassandra import ( "context" "reflect" "time" "github.com/uber/peloton/pkg/storage/objects/base" "github.com/uber/peloton/pkg/storage/orm" "github.com/gocql/gocql" log "github.com/sirupsen/logrus" "github.com/uber-go/tally" "go.uber.org/yarpc/yarpcerrors" ) const ( _defaultRetryTimeout = 50 * time.Millisecond _defaultRetryAttempts = 5 useCasWrite = true ) const ( // operation tags for metrics create = "create" cas = "cas" get = "get" getIter = "get_iter" update = "update" del = "delete" ) type cassandraConnector struct { // implements orm.Connector interface orm.Connector // Session is the gocql session created for this connector Session *gocql.Session // scope is the storage scope for metrics scope tally.Scope // scope is the storage scope for success metrics executeSuccessScope tally.Scope // scope is the storage scope for failure metrics executeFailScope tally.Scope // Conf is the Cassandra connector config for this cluster Conf *Config } // NewCassandraConnector initializes a Cassandra Connector func NewCassandraConnector( config *Config, scope tally.Scope, ) (orm.Connector, error) { session, err := CreateStoreSession( config.CassandraConn, config.StoreName) if err != nil { return nil, err } // create a storeScope for the keyspace StoreName storeScope := scope.SubScope("cql").Tagged( map[string]string{"store": config.StoreName}) return &cassandraConnector{ Session: session, scope: storeScope, executeSuccessScope: storeScope.Tagged( map[string]string{"result": "success"}), executeFailScope: storeScope.Tagged( map[string]string{"result": "fail"}), Conf: config, }, nil } // ensure that implementation (cassandraConnector) satisfies the interface var _ orm.Connector = (*cassandraConnector)(nil) // getGocqlErrorTag gets a error tag for metrics based on gocql error // We cannot just use err.Error() as a tag because it contains invalid // characters like = : etc. which will be rejected by M3 func getGocqlErrorTag(err error) string { if yarpcerrors.IsAlreadyExists(err) { return "already_exists" } if yarpcerrors.IsNotFound(err) { return "not_found" } switch err.(type) { case *gocql.RequestErrReadFailure: return "read_failure" case *gocql.RequestErrWriteFailure: return "write_failure" case *gocql.RequestErrAlreadyExists: return "already_exists" case *gocql.RequestErrReadTimeout: return "read_timeout" case *gocql.RequestErrWriteTimeout: return "write_timeout" case *gocql.RequestErrUnavailable: return "unavailable" case *gocql.RequestErrFunctionFailure: return "function_failure" case *gocql.RequestErrUnprepared: return "unprepared" default: return "unknown" } } // buildResultRow is used to allocate memory for the row to be populated by // Cassandra read operation based on what object fields are being read func buildResultRow(e *base.Definition, columns []string) []interface{} { results := make([]interface{}, len(columns)) timeType := reflect.ValueOf(time.Now()) gocqlUUIDType := reflect.ValueOf(gocql.UUIDFromTime(time.Now())) for i, column := range columns { // get the type of the field from the ColumnToType mapping for object // That we we can allocate appropriate memory for this field typ := e.ColumnToType[column] switch typ.Kind() { case reflect.String: var value *string results[i] = &value case reflect.Int32, reflect.Uint32, reflect.Int: // C* internally uses int and int64 var value *int results[i] = &value case reflect.Int64, reflect.Uint64: // C* internally uses int and int64 var value *int64 results[i] = &value case reflect.Bool: var value *bool results[i] = &value case reflect.Slice: var value *[]byte results[i] = &value case timeType.Kind(): var value *time.Time results[i] = &value case gocqlUUIDType.Kind(): var value *gocql.UUID results[i] = &value case reflect.Ptr: // Special case for custom optional string type: // string type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalString{}) { var value *string results[i] = &value break } // Special case for custom optional int type: // int64 type used in Cassandra // converted to/from custom type in ORM layer if typ == reflect.TypeOf(&base.OptionalUInt64{}) { var value *int64 results[i] = &value break } // for unrecognized pointer types, fall back to default logging fallthrough default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{"type": typ.Kind(), "column": column}). Infof("type not found") } } return results } // getRowFromResult translates a row read from Cassandra into a list of // base.Column to be interpreted by base store client func getRowFromResult( e *base.Definition, columnNames []string, columnVals []interface{}, ) []base.Column { row := make([]base.Column, 0, len(columnNames)) for i, columnName := range columnNames { // construct a list of column objects from the lists of column names // and values that were returned by the cassandra query column := base.Column{ Name: columnName, } switch rv := columnVals[i].(type) { case **int: column.Value = *rv case **int64: column.Value = *rv case **string: column.Value = *rv case **gocql.UUID: column.Value = *rv case **time.Time: column.Value = *rv case **bool: column.Value = *rv case **[]byte: column.Value = *rv default: // This should only happen if we start using a new cassandra type // without adding to the translation layer log.WithFields(log.Fields{ "data": columnVals[i], "column": columnName}).Infof("type not found") } row = append(row, column) } return row } // splitColumnNameValue is used to return list of column names and list of their // corresponding value. Order is very important in this lists as they will be // used separately when constructing the CQL query. func splitColumnNameValue(row []base.Column) ( colNames []string, colValues []interface{}) { // Split row into two lists of column names and column values. // So for a location `i` in the list, the colNames[i] and colValues[i] will // represent row[i] for _, column := range row { colNames = append(colNames, column.Name) colValues = append(colValues, column.Value) } return colNames, colValues } // Create creates a new row in DB if it already doesn't exist. Uses CAS write. func (c *cassandraConnector) CreateIfNotExists( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, useCasWrite) } // Create creates a new row in DB. func (c *cassandraConnector) Create( ctx context.Context, e *base.Definition, row []base.Column, ) error { return c.create(ctx, e, row, !useCasWrite) } func (c *cassandraConnector) create( ctx context.Context, e *base.Definition, row []base.Column, casWrite bool, ) error
// buildSelectQuery builds a select query using base object and key columns func (c *cassandraConnector) buildSelectQuery( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead []string, ) (*gocql.Query, error) { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare select statement stmt, err := SelectStmt( Table(e.Name), Columns(colNamesToRead), Conditions(keyColNames), ) if err != nil { return nil, err } return c.Session.Query(stmt, keyColValues...).WithContext(ctx), nil } // Get fetches a record from DB using primary keys func (c *cassandraConnector) Get( ctx context.Context, e *base.Definition, keyCols []base.Column, colNamesToRead ...string, ) ([]base.Column, error) { if len(colNamesToRead) == 0 { colNamesToRead = e.GetColumnsToRead() } q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // build a result row result := buildResultRow(e, colNamesToRead) if err := q.Scan(result...); err != nil { if err == gocql.ErrNotFound { err = yarpcerrors.NotFoundErrorf(err.Error()) } sendCounters(c.executeFailScope, e.Name, get, err) return nil, err } sendLatency(c.scope, e.Name, get, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, get, nil) // translate the read result into a row ([]base.Column) return getRowFromResult(e, colNamesToRead, result), nil } // GetAll fetches all rows from DB using partition keys func (c *cassandraConnector) GetAll( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (rows [][]base.Column, errors error) { iter, err := c.GetAllIter(ctx, e, keyCols) if err != nil { return nil, err } defer iter.Close() for { row, errors := iter.Next() if errors != nil { return nil, errors } if row != nil { rows = append(rows, row) } else { return rows, nil } } } // GetAllIter gives an iterator to fetch all rows from DB func (c *cassandraConnector) GetAllIter( ctx context.Context, e *base.Definition, keyCols []base.Column, ) (iter orm.Iterator, err error) { colNamesToRead := e.GetColumnsToRead() q, err := c.buildSelectQuery(ctx, e, keyCols, colNamesToRead) if err != nil { return nil, err } // execute query and get iterator cqlIter := q.Iter() sendLatency(c.scope, e.Name, getIter, time.Duration(q.Latency())) return newIterator( e, colNamesToRead, c.executeSuccessScope, c.executeFailScope, cqlIter, ), nil } // Delete deletes a record from DB using primary keys func (c *cassandraConnector) Delete( ctx context.Context, e *base.Definition, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // Prepare delete statement stmt, err := DeleteStmt( Table(e.Name), Conditions(keyColNames), ) if err != nil { return err } q := c.Session.Query(stmt, keyColValues...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, del, err) return err } sendLatency(c.scope, e.Name, del, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, del, nil) return nil } // Update updates an existing row in DB. func (c *cassandraConnector) Update( ctx context.Context, e *base.Definition, row []base.Column, keyCols []base.Column, ) error { // split keyCols into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. keyColNames, keyColValues := splitColumnNameValue(keyCols) // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare update statement stmt, err := UpdateStmt( Table(e.Name), Updates(colNames), Conditions(keyColNames), ) if err != nil { return err } // list of values to be supplied in the query updateVals := append(colValues, keyColValues...) q := c.Session.Query( stmt, updateVals...).WithContext(ctx) if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, update, err) return err } sendLatency(c.scope, e.Name, update, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, update, nil) return nil } // cassandraIterator implements interface Iterator for Cassandra type cassandraIterator struct { cqlIter *gocql.Iter tableDef *base.Definition colNamesToRead []string successScope tally.Scope failScope tally.Scope } // ensure that implementation (cassandraIterator) satisfies the interface var _ orm.Iterator = (*cassandraIterator)(nil) func newIterator( e *base.Definition, cols []string, successScope tally.Scope, failScope tally.Scope, cqlIter *gocql.Iter, ) *cassandraIterator { return &cassandraIterator{ cqlIter: cqlIter, tableDef: e, successScope: successScope, failScope: failScope, colNamesToRead: cols, } } func (iter *cassandraIterator) Close() { iter.cqlIter.Close() } func (iter *cassandraIterator) Next() ([]base.Column, error) { result := buildResultRow(iter.tableDef, iter.colNamesToRead) if iter.cqlIter.Scan(result...) { row := getRowFromResult(iter.tableDef, iter.colNamesToRead, result) return row, nil } // Either end-of-results or error if errors := iter.cqlIter.Close(); errors != nil { sendCounters(iter.failScope, iter.tableDef.Name, getIter, errors) return nil, errors } sendCounters(iter.successScope, iter.tableDef.Name, getIter, nil) return nil, nil } // helper function to record call latency metric func sendLatency( scope tally.Scope, table, operation string, d time.Duration, ) { s := scope.Tagged(map[string]string{ "table": table, "operation": operation, }) s.Timer("execute_latency").Record(d) } // helper function to record cql query success/failure metrics func sendCounters( scope tally.Scope, table, operation string, err error, ) { errMsg := "none" if err != nil { errMsg = getGocqlErrorTag(err) } s := scope.Tagged(map[string]string{ "table": table, "operation": operation, "error": errMsg, }) s.Counter("execute").Inc(1) }
{ // split row into a list of names and values to compose query stmt using // names and use values in the session query call, so the order needs to be // maintained. colNames, colValues := splitColumnNameValue(row) // Prepare insert statement stmt, err := InsertStmt( Table(e.Name), Columns(colNames), Values(colValues), IfNotExist(casWrite), ) if err != nil { return err } operation := create if casWrite { operation = cas } q := c.Session.Query(stmt, colValues...).WithContext(ctx) if casWrite { applied, err := q.MapScanCAS(map[string]interface{}{}) if err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } if !applied { return yarpcerrors.AlreadyExistsErrorf("item already exists") } } else { if err := q.Exec(); err != nil { sendCounters(c.executeFailScope, e.Name, operation, err) return err } } sendLatency(c.scope, e.Name, operation, time.Duration(q.Latency())) sendCounters(c.executeSuccessScope, e.Name, operation, nil) return nil }
identifier_body
SharedEnum.ts
/** * MADE BY GENERATOR AT 2018-12-10 20:14:50, * PLEASE DO NOT REWRITE. */ export enum EError { // 错误提示 NoError = 0, ServerError = 1, // 服务器错误 DataError = 2, // 数据错误 UnknownError = 3, // 不明错误 DataBusy = 4, // 数据繁忙 NoUsrId = 5, // 找不到用户id Frozen = 6, // 账号被冻结 Dropped = 7, // 账号被停用 } export enum EMoneyType { // 货币类型 Money = 1, // 代币 Diamond = 2, // 钻石 Gold = 3, // 元宝 Coin = 4, // 金币 } export enum EGoodsFlag { // 商品标志 None = 0, // 默认无 New = 1, // 新品 Hot = 2, // 热销 Discount = 3, // 打折 SoldOut = 4, // 下架 } export enum EAcceptCondition { // 接受关卡的条件类型 acceptcondition_front_id = 1, // 前置关卡ID acceptcondition_level = 2, // 等级 } export enum ECompleteCondition { // 完成关卡的条件类型 completecondition_score = 1, // 分数 completecondition_kill = 2, // 杀人数 } export enum ECompleteReward { // 完成关卡的奖励类型 completereward_exp = 1, // 经验 completereward_item = 2, // 物品 completereward_gold = 3, // 金币 } export enum EUsrState { uninit = 1, // 新账号 init_actor = 2, // 选择初始艺人 usual = 3, // 启用 frozen = 4, // 冻结 dropped = 5, // 停用 } export enum EItemNumber { // 玩家数值cid 作为一个物品 diamond = 1, // 钻石 gold = 2, // 金币 } export enum EPublishTypeCode { sdknotice = 1, // sdk公告 usrmail = 2, // 用户邮箱 gamemail = 9, // 游戏内邮箱 gamenotice = 10, // 游戏内公告 } export enum EUserSex { boy = 2001, girl = 2002, } export enum EItemCategory { // 道具类型 diamond = 1, // 钻石 gold = 2, // 金币 dollar = 3, // 美元 other = 4, // 周边产品 normal = 5, // 普通道具 assets = 6, // 资产 contract = 7, // 合同 giftbag = 8, // 礼包 licence = 10, // 拍摄许可证 cup = 14, // 奖杯 piecesgiftbag = 17, // 碎片选择礼包 story = 18, // 剧本 } export enum EItemId { diamond = 1, gold = 2, dollar = 3, licence = 4, // 拍摄许可证 fans = 5, praise = 6, // 点赞 drawing = 7, // 图纸 expect = 8, // 期待 study_card = 5004, // 进修卡 talent = 5007, // 星探卡 player_rename_card = 5008, // 改公司名道具 actor_stamina_water = 5010, // 技能药剂,增加体力 actor_loyalty_card = 5003, // 忠诚卡 brochure = 5005, // 宣传册 horn = 5006, // 喇叭 } export enum EGameDataRename { // gameData 模块相关枚举 renameLengthMin = 4, renameLengthMax = 18, rePerSignatureLengthMax = 100, } export enum EStoryStack { // storyStack 表的枚举 story_stack_daily_times = 2045, // 剧本搜罗每天搜罗次数 story_stack_search_cost = 2046, // 剧本搜罗每次消耗物品 story_stack_cd_stage_cost = 2047, // 剧本搜罗每阶段搜罗冷却加速消耗资源类型和数量 story_stack_cd_stage_length = 2048, // 剧本搜罗阶段时长 story_stack_cd = 2050, // 剧本搜罗冷却时间 story_stack_daily_update = 2051, // 每日刷新时间 story_stack_cd_block_level = 2052, // 剧本搜罗冷却功能开放所需街区等级 } export enum EStoryModule { // story 模块的枚举 noFinish = 0, finish = 1, hadGet = 2, randomLength = 3, shootTime = 5, score = 8, } export enum EMovieState { chooseScript = 2, // 选择剧本 renameFilm = 3, // 修改剧本名称 currentMarketInf = 4, // 当前市场反馈 chooseActor = 5, // 选择艺人 costView = 6, // 薪酬计算 yRShowView = 7, // 艺人展示 compatibility = 8, // 艺人对影片的契合度 filming = 9, // 正在拍摄中 chooseType = 10, // 选择类型 chooseTip = 11, // 选择后的提示 propaganda = 12, // 宣传 complete = 13, // 杀青 actorUpdate = 14, // 艺人熟练度 proficiency = 15, // 杀青对比情况 chooseTheater = 16, // 选择院线 noticeFeedBack = 17, // 媒体评价 SYPF = 18, // 首映票房 audienceReputation = 19, // 观众口碑 result = 20, // 上映结果 overMarket = 21, // 下映提示 Jiesuan = 22, // 结算分享界面 rewardTip = 23, // 奖励物品提示 contineTransceiver = 24, // 持续收益 } export enum EGlobalId { maxMovieNum = 9, // 最多可同时进行持续收益的电影数量 tempActor = 9999, // 临时演员id firstActor = 9997, // 首部电影演员巨石强森 loyaltyCardLimit = 1035, // 忠诚卡增加赠送物品上限 } export enum EMovieType { } export enum EBoxOfficeEvaluation { // 票房评价 normal = 1, // 一般 good = 2, // 良好 big_sell = 3, // 大卖 great_sell = 4, // 超卖 myth_sell = 5, // 神话 marvel_sell = 6, // 传奇 } export enum EProgressBoxId { manageOrder = 1, // 经营订单进度宝箱 dailyTask = 2, // 日常任务宝箱 cooperateTask = 3, // 合作任务宝箱 } export enum EManageProduce { // 经营生产 line = 1, // 队列 save = 2, // 存储 time = 3, // 时间减免 output_initial = 4, // 每次产出 dollarIntervalTime = 11, // 美元生产线时间 baseProduceId = 100, // 生产纪念品的生产线ID,该生产线只产生美元 intervalTime = 300, // 间隔时间 } export enum ERankListType { SelfList = 1, // 自己的排名 ServerList = 2, // 全服排行榜 GroupList = 3, // 分组总票房 GroupMovieList = 4, // 分组影片票房 } export enum EAchievementState { // 成就或者任务的状态
finished = 3, // 完成 rewarded = 4, // 已领奖的 } export enum EAchievementType { // 成就或者任务的类型 daily = 1, // 日常 achievement = 2, // 成就 story = 3, // 剧本 mainTask = 4, // 主线任务 } export enum EMallItemLimitType { // 商店限购类型 daily = 1, // 每日 weekly = 2, // 每周 } export enum EManageBusiness { // 经营事务 baseLine = 10, // 基础代办事务上限 interval = 180, // 间隔时长 } export enum EManageVisit { // 经营探班 baseLine = 3, // 基础探班队列 overdueTime = 180, // 过期时长 baseIntervalTime = 300, // 基本间隔时间 } export enum EManageOrder { // 经营订单 baseLength = 9, // 最多显示9个订单 baseTime = 1200, // 订单存活时间 delTime = 20, // 删除订单缓存时间 } export enum EMailId { // 邮件ID枚举 cooperateApplyFail = 3, // 合作邀请失败 orderId = 4, // 订单过期邮件ID partnerGive = 7, //伙伴赠送 firstCooperate = 8, // 首次建立合作奖励 firstAccountCooperate = 9, // 首次账号查找建立合作 delOrderId = 10, // 删除的订单邮件ID rewardOrder = 11, //订单奖励过期 dailyTask = 12, // 日程任务未领取 cupPackage = 13, // 奖杯礼包 } export enum ESecretaryType { // 秘书拥有类型 noHave = 0, // 未获取 had = 1, // 终生拥有 temporaryHad = 2, // 暂时拥有 } export enum ESecretary { // 秘书表枚举 skillExp = 2, // 艺人技能训练经验增加N% trainTime = 3, // 艺人培养时间减少N% searchTime = 4, // 星探等待时间减少N% starSearch = 5, // 每天额外星探次数增加N cityTimes = 6, // 城市自动宣传次数 workTimes = 7, // 自动处理公务次数 visitTimes = 8, // 自动接待次数 secretaryExpStart = 9, // 秘书体验活动开始时间 secretaryExpEnd = 10, // 秘书体验活动结束时间 applicationDuration = 11, //许可证申请时长 applicationLicense = 12, //申请许可证物品类型 applicationLicenseNum = 13, // 申请许可证物品数量 freeGiftId = 1001, // 每日免费领取礼包ID moneyGiftId = 2001, // 每日需要花费领取的礼包ID } export enum EManageMeet { // 经营会议枚举 baseLine = 3, // 基础探班队列 baseIntervalTime = 1200, // 基本间隔时间 } export enum EOscarType { man = 1, woman = 2, perform = 3, art = 4, publicPraise = 5, bestBoxOffice = 6, totalBoxOffice = 7, movies = 8, bigSell = 9, } export enum EGMRefreshType { // GM的刷新类型 dailySign = 0, // 日常签到 dailyTask = 1, // 日常任务 dailyMall = 2, // 商城每日限购道具 weeklyMall = 3, // 商城每周限购道具 manageProduce = 4, // 管理生产线cd的清除 sevenAddup = 5, // 七日累计登陆 findStory = 6, // 剧本搜罗清除 } export enum EItemUseType { // 物品使用类型 noReward = 0, // 无奖励 allReward = 1, // 获得reward中所有奖励 randomReward = 2, // 随机获得一项奖励 } export enum EItemType { // 物品类型 randomActor = 16, // 随机获得一个艺人 } export enum EGuideType { guide = 1, // 引导 story = 2, // 剧情 } export enum ENoticeType { init = 1, add = 2, update = 4, remove = 8, } export enum ECooperate { taskLength = 6, } export enum EChatChannel { private = 1, // 私人聊天 world = 2, // 世界聊天 company = 3, // 公司聊天 } export enum ESystemMessageId { getActor1 = 1001, getActor2 = 1002, movie = 1003, updateName = 1004, createCompany = 1005, joinCompany = 1006, } export enum ESystemType { userName = 1, // 玩家/公司 名称 oldName = 2, // 旧名称 actorStar = 3, // 艺人星级 actorName = 4, // 艺人名称 moiveName = 5, // 电影名称 blockLevel = 6, // 街道等级 boxOfficeEvaluation = 7, // 电影等级 groupName = 8, // 集团名称 } export class ECommon { // 通用枚举 static MaxNameLenth = 20; // 名字最大长度 static DefaultName = "Brad Pitt"; // 默认名字 static MoveSpeed = 1.3; // 移动速度 static CanJump = false; // 开启跳跃 } export class EChannel { // 渠道枚举 static facebook = "1"; static google = "2"; } export class ProtoVersion { //工具自动生成的枚举,记录协议版本号 static versionCode = 10; //协议版本号数字表示 static versionName = "0.1.0"; //协议版本号字符表示 } export class DBOType { //工具自动生成的枚举,枚举所有的存库对象 static UsrData = "UsrData"; static GameInfo = "GameInfo"; static GameInfoExt = "GameInfoExt"; static GMMsgLog = "GMMsgLog"; static OrderData = "OrderData"; static ItemData = "ItemData"; static StorySuitData = "StorySuitData"; static StoryData = "StoryData"; static ActorData = "ActorData"; static CityData = "CityData"; static ActorSkillEffect = "ActorSkillEffect"; static TempData = "TempData"; static MovieData = "MovieData"; static ManageOrderData = "ManageOrderData"; // 经营订单 static ProgressBoxData = "ProgressBoxData"; static ManageProduceData = "ManageProduceData"; // 经营生产 static MovieLicenceResetTimeData = "MovieLicenceResetTimeData"; static ManageBusinessData = "ManageBusinessData"; // 经营公务 static ManageVisitData = "ManageVisitData"; // 经营探班 static StoryMessage = "StoryMessage"; static RankListData = "RankListData"; static TaskData = "TaskData"; // 任务结构 static MallItemLimitData = "MallItemLimitData"; // 单个商品限购数据 static MailData = "MailData"; static ManageMeetingData = "ManageMeetingData"; // 经营会议 static DanmuData = "DanmuData"; static DailySign = "DailySign"; // 日常签到数据 static SecretaryData = "SecretaryData"; // 小秘书功能 static SevenAddUpData = "SevenAddUpData"; // 活动期间累计达到某种要求领取奖励 static ActortExerciseData = "ActortExerciseData"; // 艺人训练队列 static DailyEvent = "DailyEvent"; // 日程事件 static GuideData = "GuideData"; // 引导数据 static PlayerCooperateData = "PlayerCooperateData"; // 合作 static AccountCooperateData = "AccountCooperateData"; // 账号邀请合作数据 static AutoCooperateData = "AutoCooperateData"; // 公共邀请合作数据 static CooperateTaskData = "CooperateTaskData"; // 合作任务 static CooperateOrderData = "CooperateOrderData"; // 合作订单 static LevelGiftData = "LevelGiftData"; static GetGuideReward = "GetGuideReward"; // 完成所有引导任务领取奖励 static CupPackage = "CupPackage"; // 奖杯礼包 static StoryRefreshData = "StoryRefreshData"; // 剧本刷新时间信息 } export class DBOShared { //工具自动生成的枚举,枚举所有的存库对象的元信息 static UsrData = { dbname: 'game',tbname: 'UsrData',shared: true }; static GameInfo = { dbname: 'game',tbname: 'GameInfo',shared: true }; static GameInfoExt = { dbname: 'game',tbname: 'GameInfoExt',shared: true }; static GMMsgLog = { dbname: 'game',tbname: 'GMMsgLog',shared: true }; static OrderData = { dbname: 'game_global',tbname: 'OrderData',shared: false }; static ItemData = { dbname: 'game',tbname: 'ItemData',shared: true }; static StorySuitData = { dbname: 'game',tbname: 'StorySuitData',shared: true }; static StoryData = { dbname: 'game',tbname: 'StoryData',shared: true }; static ActorData = { dbname: 'game',tbname: 'ActorData',shared: true }; static CityData = { dbname: 'game',tbname: 'CityData',shared: true }; static ActorSkillEffect = { dbname: 'game',tbname: 'ActorSkillEffect',shared: true }; static TempData = { dbname: 'game',tbname: 'TempData',shared: true }; static MovieData = { dbname: 'game',tbname: 'MovieData',shared: true }; static ManageOrderData = { dbname: 'game',tbname: 'ManageOrderData',shared: true }; // 经营订单 static ProgressBoxData = { dbname: 'game',tbname: 'ProgressBoxData',shared: true }; static ManageProduceData = { dbname: 'game',tbname: 'ManageProduceData',shared: true }; // 经营生产 static MovieLicenceResetTimeData = { dbname: 'game',tbname: 'MovieLicenceResetTimeData',shared: true }; static ManageBusinessData = { dbname: 'game',tbname: 'ManageBusinessData',shared: true }; // 经营公务 static ManageVisitData = { dbname: 'game',tbname: 'ManageVisitData',shared: true }; // 经营探班 static StoryMessage = { dbname: 'game_global',tbname: 'StoryMessage',shared: false }; static RankListData = { dbname: 'game_global',tbname: 'RankListData',shared: false }; static TaskData = { dbname: 'game',tbname: 'TaskData',shared: true }; // 任务结构 static MallItemLimitData = { dbname: 'game',tbname: 'MallItemLimitData',shared: true }; // 单个商品限购数据 static MailData = { dbname: 'game',tbname: 'MailData',shared: true }; static ManageMeetingData = { dbname: 'game',tbname: 'ManageMeetingData',shared: true }; // 经营会议 static DanmuData = { dbname: 'game_global',tbname: 'DanmuData',shared: false }; static DailySign = { dbname: 'game',tbname: 'DailySign',shared: true }; // 日常签到数据 static SecretaryData = { dbname: 'game',tbname: 'SecretaryData',shared: true }; // 小秘书功能 static SevenAddUpData = { dbname: 'game',tbname: 'SevenAddUpData',shared: true }; // 活动期间累计达到某种要求领取奖励 static ActortExerciseData = { dbname: 'game',tbname: 'ActortExerciseData',shared: true }; // 艺人训练队列 static DailyEvent = { dbname: 'game',tbname: 'DailyEvent',shared: true }; // 日程事件 static GuideData = { dbname: 'game',tbname: 'GuideData',shared: true }; // 引导数据 static PlayerCooperateData = { dbname: 'game',tbname: 'PlayerCooperateData',shared: true }; // 合作 static AccountCooperateData = { dbname: 'game_global',tbname: 'AccountCooperateData',shared: false }; // 账号邀请合作数据 static AutoCooperateData = { dbname: 'game_global',tbname: 'AutoCooperateData',shared: false }; // 公共邀请合作数据 static CooperateTaskData = { dbname: 'game_global',tbname: 'CooperateTaskData',shared: false }; // 合作任务 static CooperateOrderData = { dbname: 'game_global',tbname: 'CooperateOrderData',shared: false }; // 合作订单 static LevelGiftData = { dbname: 'game',tbname: 'LevelGiftData',shared: true }; static GetGuideReward = { dbname: 'game',tbname: 'GetGuideReward',shared: true }; // 完成所有引导任务领取奖励 static CupPackage = { dbname: 'game',tbname: 'CupPackage',shared: true }; // 奖杯礼包 static StoryRefreshData = { dbname: 'game',tbname: 'StoryRefreshData',shared: true }; // 剧本刷新时间信息 }
none = 0, // 初始化 receivable = 1, // 可接 received = 2, // 已接
random_line_split
SharedEnum.ts
/** * MADE BY GENERATOR AT 2018-12-10 20:14:50, * PLEASE DO NOT REWRITE. */ export enum EError { // 错误提示 NoError = 0, ServerError = 1, // 服务器错误 DataError = 2, // 数据错误 UnknownError = 3, // 不明错误 DataBusy = 4, // 数据繁忙 NoUsrId = 5, // 找不到用户id Frozen = 6, // 账号被冻结 Dropped = 7, // 账号被停用 } export enum EMoneyType { // 货币类型 Money = 1, // 代币 Diamond = 2, // 钻石 Gold = 3, // 元宝 Coin = 4, // 金币 } export enum EGoodsFlag { // 商品标志 None = 0, // 默认无 New = 1, // 新品 Hot = 2, // 热销 Discount = 3, // 打折 SoldOut = 4, // 下架 } export enum EAcceptCondition { // 接受关卡的条件类型 acceptcondition_front_id = 1, // 前置关卡ID acceptcondition_level = 2, // 等级 } export enum ECompleteCondition { // 完成关卡的条件类型 completecondition_score = 1, // 分数 completecondition_kill = 2, // 杀人数 } export enum ECompleteReward { // 完成关卡的奖励类型 completereward_exp = 1, // 经验 completereward_item = 2, // 物品 completereward_gold = 3, // 金币 } export enum EUsrState { uninit = 1, // 新账号 init_actor = 2, // 选择初始艺人 usual = 3, // 启用 frozen = 4, // 冻结 dropped = 5, // 停用 } export enum EItemNumber { // 玩家数值cid 作为一个物品 diamond = 1, // 钻石 gold = 2, // 金币 } export enum EPublishTypeCode { sdknotice = 1, // sdk公告 usrmail = 2, // 用户邮箱 gamemail = 9, // 游戏内邮箱 gamenotice = 10, // 游戏内公告 } export enum EUserSex { boy = 2001, girl = 2002, } export enum EItemCategory { // 道具类型 diamond = 1, // 钻石 gold = 2, // 金币 dollar = 3, // 美元 other = 4, // 周边产品 normal = 5, // 普通道具 assets = 6, // 资产 contract = 7, // 合同 giftbag = 8, // 礼包 licence = 10, // 拍摄许可证 cup = 14, // 奖杯 piecesgiftbag = 17, // 碎片选择礼包 story = 18, // 剧本 } export enum EItemId { diamond = 1, gold = 2, dollar = 3, licence = 4, // 拍摄许可证 fans = 5, praise = 6, // 点赞 drawing = 7, // 图纸 expect = 8, // 期待 study_card = 5004, // 进修卡 talent = 5007, // 星探卡 player_rename_card = 5008, // 改公司名道具 actor_stamina_water = 5010, // 技能药剂,增加体力 actor_loyalty_card = 5003, // 忠诚卡 brochure = 5005, // 宣传册 horn = 5006, // 喇叭 } export enum EGameDataRename { // gameData 模块相关枚举 renameLengthMin = 4, renameLengthMax = 18, rePerSignatureLengthMax = 100, } export enum EStoryStack { // storyStack 表的枚举 story_stack_daily_times = 2045, // 剧本搜罗每天搜罗次数 story_stack_search_cost = 2046, // 剧本搜罗每次消耗物品 story_stack_cd_stage_cost = 2047, // 剧本搜罗每阶段搜罗冷却加速消耗资源类型和数量 story_stack_cd_stage_length = 2048, // 剧本搜罗阶段时长 story_stack_cd = 2050, // 剧本搜罗冷却时间 story_stack_daily_update = 2051, // 每日刷新时间 story_stack_cd_block_level = 2052, // 剧本搜罗冷却功能开放所需街区等级 } export enum EStoryModule { // story 模块的枚举 noFinish = 0, finish = 1, hadGet = 2, randomLength = 3, shootTime = 5, score = 8, } export enum EMovieState { chooseScript = 2, // 选择剧本 renameFilm = 3, // 修改剧本名称 currentMarketInf = 4, // 当前市场反馈 chooseActor = 5, // 选择艺人 costView = 6, // 薪酬计算 yRShowView = 7, // 艺人展示 compatibility = 8, // 艺人对影片的契合度 filming = 9, // 正在拍摄中 chooseType = 10, // 选择类型 chooseTip = 11, // 选择后的提示 propaganda = 12, // 宣传 complete = 13, // 杀青 actorUpdate = 14, // 艺人熟练度 proficiency = 15, // 杀青对比情况 chooseTheater = 16, // 选择院线 noticeFeedBack = 17, // 媒体评价 SYPF = 18, // 首映票房 audienceReputation = 19, // 观众口碑 result = 20, // 上映结果 overMarket = 21, // 下映提示 Jiesuan = 22, // 结算分享界面 rewardTip = 23, // 奖励物品提示 contineTransceiver = 24, // 持续收益 } export enum EGlobalId { maxMovieNum = 9, // 最多可同时进行持续收益的电影数量 tempActor = 9999, // 临时演员id firstActor = 9997, // 首部电影演员巨石强森 loyaltyCardLimit = 1035, // 忠诚卡增加赠送物品上限 } export enum EMovieType { } export enum EBoxOfficeEvaluation { // 票房评价 normal = 1, // 一般 good = 2, // 良好 big_sell = 3, // 大卖 great_sell = 4, // 超卖 myth_sell = 5, // 神话 marvel_sell = 6, // 传奇 } export enum EProgressBoxId { manageOrder = 1, // 经营订单进度宝箱 dailyTask = 2, // 日常任务宝箱 cooperateTask = 3, // 合作任务宝箱 } export enum EManageProduce { // 经营生产 line = 1, // 队列 save = 2, // 存储 time = 3, // 时间减免 output_initial = 4, // 每次产出 dollarIntervalTime = 11, // 美元生产线时间 baseProduceId = 100, // 生产纪念品的生产线ID,该生产线只产生美元 intervalTime = 300, // 间隔时间 } export enum ERankListType { SelfList = 1, // 自己的排名 ServerList = 2, // 全服排行榜 GroupList = 3, // 分组总票房 GroupMovieList = 4, // 分组影片票房 } export enum EAchievementState { // 成就或者任务的状态 none = 0, // 初始化 receivable = 1, // 可接 received = 2, // 已接 finished = 3, // 完成 rewarded = 4, // 已领奖的 } export enum EAchievementType { // 成就或者任务的类型 daily = 1, // 日常 achievement = 2, // 成就 story = 3, // 剧本 mainTask = 4, // 主线任务 } export enum EMallItemLimitType { // 商店限购类型 daily = 1, // 每日 weekly = 2, // 每周 } export enum EManageBusiness { // 经营事务 baseLine = 10, // 基础代办事务上限 interval = 180, // 间隔时长 } export enum EManageVisit { // 经营探班 baseLine = 3, // 基础探班队列 overdueTime = 180, // 过期时长 baseIntervalTime = 300, // 基本间隔时间 } export enum EManageOrder { // 经营订单 baseLength = 9, // 最多显示9个订单 baseTime = 1200, // 订单存活时间 delTime = 20, // 删除订单缓存时间 } export enum EMailId { // 邮件ID枚举 cooperateApplyFail = 3, // 合作邀请失败 orderId = 4, // 订单过期邮件ID partnerGive = 7, //伙伴赠送 firstCooperate = 8, // 首次建立合作奖励 firstAccountCooperate = 9, // 首次账号查找建立合作 delOrderId = 10, // 删除的订单邮件ID rewardOrder = 11, //订单奖励过期 dailyTask = 12, // 日程任务未领取 cupPackage = 13, // 奖杯礼包 } export enum ESecretaryType { // 秘书拥有类型 noHave = 0, // 未获取 had = 1, // 终生拥有 temporaryHad = 2, // 暂时拥有 } export enum ESecretary { // 秘书表枚举 skillExp = 2, // 艺人技能训练经验增加N% trainTime = 3, // 艺人培养时间减少N% searchTime = 4, // 星探等待时间减少N% starSearch = 5, // 每天额外星探次数增加N cityTimes = 6, // 城市自动宣传次数 workTimes = 7, // 自动处理公务次数 visitTimes = 8, // 自动接待次数 secretaryExpStart = 9, // 秘书体验活动开始时间 secretaryExpEnd = 10, // 秘书体验活动结束时间 applicationDuration = 11, //许可证申请时长 applicationLicense = 12, //申请许可证物品类型 applicationLicenseNum = 13, // 申请许可证物品数量 freeGiftId = 1001, // 每日免费领取礼包ID moneyGiftId = 2001, // 每日需要花费领取的礼包ID } export enum EManageMeet { // 经营会议枚举 baseLine = 3, // 基础探班队列 baseIntervalTime = 1200, // 基本间隔时间 } export enum EOscarType { man = 1, woman = 2, perform = 3, art = 4, publicPraise = 5, bestBoxOffice = 6, totalBoxOffice = 7, movies = 8, bigSell = 9, } export enum EGMRefreshType { // GM的刷新类型 dailySign = 0, // 日常签到 dailyTask = 1, // 日常任务 dailyMall = 2, // 商城每日限购道具 weeklyMall = 3, // 商城每周限购道具 manageProduce = 4, // 管理生产线cd的清除 sevenAddup = 5, // 七日累计登陆 findStory = 6, // 剧本搜罗清除 } export enum EItemUseType { // 物品使用类型 noReward = 0, // 无奖励 allReward = 1, // 获得reward中所有奖励 randomReward = 2, // 随机获得一项奖励 } export enum EItemType { // 物品类型 randomActor = 16, // 随机获得一个艺人 } export enum EGuideType { guide = 1, // 引导 story = 2, // 剧情 } export enum ENoticeType { init = 1, add = 2, update = 4, remove = 8, } export enum ECooperate { taskLength = 6, } export enum EChatChannel { private = 1, // 私人聊天 world = 2, // 世界聊天 company = 3, // 公司聊天 } export enum ESystemMessageId { getActor1 = 1001, getActor2 = 1002, movie = 1003, updateName = 1004, createCompany = 1005, joinCompany = 1006, } export enum ESystemType { userName = 1, // 玩家/公司 名称 oldName = 2, // 旧名称 actorStar = 3, // 艺人星级 actorName = 4, // 艺人名称 moiveName = 5, // 电影名称 blockLevel = 6, // 街道等级 boxOfficeEvaluation = 7, // 电影等级 groupName = 8, // 集团名称 } export class ECommon { // 通用枚举 static MaxNameLenth = 20; // 名字最大长度 static DefaultName = "Brad Pitt"; // 默认名字 static MoveSpeed = 1.3; // 移动速度 static CanJump = false; // 开启跳跃 } export class EChannel { // 渠道枚举 static facebook = "1"; static google = "2"; } export class ProtoVersion { //工具自动生成的枚举,记录协议版本号 static versionCode = 10; //协议版本号数字表示 static versionName = "0.1.0"; //协议版本号字符表示 } export class DBOType { //工具自动生成的枚举,枚举所有的存库对象 static UsrData = "UsrData"; static GameInfo = "GameInfo"; static GameInfoExt = "GameInfoExt"; static GMMsgLog = "GMMsgLog"; static OrderData = "OrderData"; static ItemData = "ItemData"; static StorySuitData = "StorySuitData"; static StoryData = "StoryData"; static ActorData = "ActorData"; static CityData = "CityData"; static ActorSkillEffect = "ActorSkillEffect"; static TempData = "TempData"; static MovieData = "MovieData"; static ManageOrderData = "ManageOrderData"; // 经营订单 static ProgressBoxData = "ProgressBoxData"; static ManageProduceData = "ManageProduceData"; // 经营生产 static MovieLicenceResetTimeData = "MovieLicenceResetTimeData"; static ManageBusinessData = "ManageBusinessData"; // 经营公务 static ManageVisitData = "ManageVisitData"; // 经营探班 static StoryMessage = "StoryMessage"; static RankListData = "RankListData"; static TaskData = "TaskData"; // 任务结构 static MallItemLimitData = "MallItemLimitData"; // 单个商品限购数据 static MailData = "MailData"; static ManageMeetingData = "ManageMeetingData"; // 经营会议 static DanmuData = "DanmuData"; static DailySign = "DailySign"; // 日常签到数据 static SecretaryData = "SecretaryData"; // 小秘书功能 static SevenAddUpData = "SevenAddUpData"; // 活动期间累计达到某种要求领取奖励 static ActortExerciseData = "ActortExerciseData"; // 艺人训练队列 static DailyEvent = "DailyEvent"; // 日程事件 static GuideData = "GuideData"; // 引导数据 static PlayerCooperateData = "PlayerCooperateData"; // 合作 static AccountCooperateData = "AccountCooperateData"; // 账号邀请合作数据 static AutoCooperateData = "AutoCooperateData"; // 公共邀请合作数据 static CooperateTaskData = "CooperateTaskData"; // 合作任务 static CooperateOrderData = "CooperateOrderData"; // 合作订单 static LevelGiftData = "LevelGiftData"; static GetGuideReward =
eReward"; // 完成所有引导任务领取奖励 static CupPackage = "CupPackage"; // 奖杯礼包 static StoryRefreshData = "StoryRefreshData"; // 剧本刷新时间信息 } export class DBOShared { //工具自动生成的枚举,枚举所有的存库对象的元信息 static UsrData = { dbname: 'game',tbname: 'UsrData',shared: true }; static GameInfo = { dbname: 'game',tbname: 'GameInfo',shared: true }; static GameInfoExt = { dbname: 'game',tbname: 'GameInfoExt',shared: true }; static GMMsgLog = { dbname: 'game',tbname: 'GMMsgLog',shared: true }; static OrderData = { dbname: 'game_global',tbname: 'OrderData',shared: false }; static ItemData = { dbname: 'game',tbname: 'ItemData',shared: true }; static StorySuitData = { dbname: 'game',tbname: 'StorySuitData',shared: true }; static StoryData = { dbname: 'game',tbname: 'StoryData',shared: true }; static ActorData = { dbname: 'game',tbname: 'ActorData',shared: true }; static CityData = { dbname: 'game',tbname: 'CityData',shared: true }; static ActorSkillEffect = { dbname: 'game',tbname: 'ActorSkillEffect',shared: true }; static TempData = { dbname: 'game',tbname: 'TempData',shared: true }; static MovieData = { dbname: 'game',tbname: 'MovieData',shared: true }; static ManageOrderData = { dbname: 'game',tbname: 'ManageOrderData',shared: true }; // 经营订单 static ProgressBoxData = { dbname: 'game',tbname: 'ProgressBoxData',shared: true }; static ManageProduceData = { dbname: 'game',tbname: 'ManageProduceData',shared: true }; // 经营生产 static MovieLicenceResetTimeData = { dbname: 'game',tbname: 'MovieLicenceResetTimeData',shared: true }; static ManageBusinessData = { dbname: 'game',tbname: 'ManageBusinessData',shared: true }; // 经营公务 static ManageVisitData = { dbname: 'game',tbname: 'ManageVisitData',shared: true }; // 经营探班 static StoryMessage = { dbname: 'game_global',tbname: 'StoryMessage',shared: false }; static RankListData = { dbname: 'game_global',tbname: 'RankListData',shared: false }; static TaskData = { dbname: 'game',tbname: 'TaskData',shared: true }; // 任务结构 static MallItemLimitData = { dbname: 'game',tbname: 'MallItemLimitData',shared: true }; // 单个商品限购数据 static MailData = { dbname: 'game',tbname: 'MailData',shared: true }; static ManageMeetingData = { dbname: 'game',tbname: 'ManageMeetingData',shared: true }; // 经营会议 static DanmuData = { dbname: 'game_global',tbname: 'DanmuData',shared: false }; static DailySign = { dbname: 'game',tbname: 'DailySign',shared: true }; // 日常签到数据 static SecretaryData = { dbname: 'game',tbname: 'SecretaryData',shared: true }; // 小秘书功能 static SevenAddUpData = { dbname: 'game',tbname: 'SevenAddUpData',shared: true }; // 活动期间累计达到某种要求领取奖励 static ActortExerciseData = { dbname: 'game',tbname: 'ActortExerciseData',shared: true }; // 艺人训练队列 static DailyEvent = { dbname: 'game',tbname: 'DailyEvent',shared: true }; // 日程事件 static GuideData = { dbname: 'game',tbname: 'GuideData',shared: true }; // 引导数据 static PlayerCooperateData = { dbname: 'game',tbname: 'PlayerCooperateData',shared: true }; // 合作 static AccountCooperateData = { dbname: 'game_global',tbname: 'AccountCooperateData',shared: false }; // 账号邀请合作数据 static AutoCooperateData = { dbname: 'game_global',tbname: 'AutoCooperateData',shared: false }; // 公共邀请合作数据 static CooperateTaskData = { dbname: 'game_global',tbname: 'CooperateTaskData',shared: false }; // 合作任务 static CooperateOrderData = { dbname: 'game_global',tbname: 'CooperateOrderData',shared: false }; // 合作订单 static LevelGiftData = { dbname: 'game',tbname: 'LevelGiftData',shared: true }; static GetGuideReward = { dbname: 'game',tbname: 'GetGuideReward',shared: true }; // 完成所有引导任务领取奖励 static CupPackage = { dbname: 'game',tbname: 'CupPackage',shared: true }; // 奖杯礼包 static StoryRefreshData = { dbname: 'game',tbname: 'StoryRefreshData',shared: true }; // 剧本刷新时间信息 }
"GetGuid
identifier_name
main.rs
#![feature(proc_macro_hygiene, decl_macro)] #![feature(custom_inner_attributes)] #[macro_use] extern crate log; pub mod net; mod runtime; mod cache; mod perms; use cache::PathManager; use perms::*; pub mod protos { tonic::include_proto!("request"); } use protos::karl_host_server::KarlHostServer; use protos::*; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use std::sync::{Arc, Mutex}; use tokio; use tokio::sync::mpsc; use karl_common::*; use reqwest::{self, Method, header::HeaderName}; use tonic::{Request, Response, Status, Code}; use tonic::transport::Server; use clap::{Arg, App}; struct WarmProcess { process_token: ProcessToken, tx: mpsc::Sender<()>, } #[derive(Clone)] pub struct Host { /// Host ID (unique among hosts) id: u32, /// Host API to controller. api: crate::net::KarlHostAPI, /// Active process tokens. process_tokens: Arc<Mutex<HashMap<ProcessToken, ProcessPerms>>>, warm_processes: Arc<Mutex<HashMap<ModuleID, Vec<WarmProcess>>>>, warm_cache_tx: Option<mpsc::Sender<ComputeRequest>>, /// Path manager. path_manager: Arc<PathManager>, /// Only one compute at a time. compute_lock: Arc<Mutex<()>>, /// Whether caching is enabled cold_cache_enabled: bool, warm_cache_enabled: bool, /// Whether to read triggered data locally or forward to the data sink pubsub_enabled: bool, /// Whether to mock network access mock_network: bool, } #[tonic::async_trait] impl karl_host_server::KarlHost for Host { async fn start_compute( &self, req: Request<ComputeRequest>, ) -> Result<Response<NotifyStart>, Status> { let mut req = req.into_inner(); trace!("HANDLE_COMPUTE START {}", req.module_id); if let Some(process_token) = self.attach_warm_process(&mut req).await { Ok(Response::new(NotifyStart { process_token })) } else { let triggered_tag = req.triggered_tag.drain(..).collect(); let triggered_timestamp = req.triggered_timestamp.drain(..).collect(); let is_warm = false; let process_token = Host::spawn_new_process( self.clone(), req, is_warm, triggered_tag, triggered_timestamp, ).await; Ok(Response::new(NotifyStart { process_token })) } } async fn network( &self, req: Request<NetworkAccess>, ) -> Result<Response<NetworkAccessResult>, Status> { debug!("network"); // Validate the process is valid and has permissions to access the network. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower != req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if !self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if !perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and
/// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn push( &self, req: Request<PushData>, ) -> Result<Response<()>, Status> { debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else { // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await } } } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled || !warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if !is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if !triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if !triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if !req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if !req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached && !cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached && !path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error // what if a second request gets here before the first // request caches the module? race condition return Err(Error::CacheError(format!("module {} is not cached", module_id))); } if !cached { path_manager.cache_module(&module_id, package)?; } debug!("unpacked request => {} s", now.elapsed().as_secs_f32()); let now = Instant::now(); let (mount, paths) = path_manager.new_request(&module_id)?; // info!("=> preprocessing: {} s", now.elapsed().as_secs_f32()); debug!("mounting overlayfs => {} s", now.elapsed().as_secs_f32()); drop(lock); (mount, paths) }; let start = Instant::now(); let _res = runtime::run( &paths.root_path, binary_path, args, envs, )?; let execution_time = start.elapsed(); debug!("invoked binary => {} s", execution_time.as_secs_f32()); trace!("HANDLE_COMPUTE FINISH {}", module_id); // Reset the root for the next computation. path_manager.unmount(mount)?; if let Err(e) = std::fs::remove_dir_all(&paths.request_path) { error!("error resetting request path: {:?}", e); } let now = Instant::now(); trace!( "reset directory at {:?} => {} s", &paths.request_path, now.elapsed().as_secs_f32(), ); Ok(execution_time) } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { env_logger::builder().filter_level(log::LevelFilter::Info).init(); let pwd = std::fs::canonicalize(".")?; let path = format!("{}/.host", std::env::var("KARL_PATH") .unwrap_or(pwd.as_os_str().to_str().unwrap().to_string())); let matches = App::new("Karl Host") .arg(Arg::with_name("path") .help("Absolute path to the host's base directory. \ Caches modules at `<path>/cache/<module_id>`. \ If there are multiple hosts on the same computer, they share \ the same cache. But each host has an automatically generated \ directory `<path>/host-<id>` for processing requests that is \ removed when the host is killed. Each request has a root \ directory at `<path>/host-<id>/<request>`.") .long("path") .takes_value(true) .default_value(&path)) .arg(Arg::with_name("port") .help("Port.") .short("p") .long("port") .takes_value(true) .default_value("59583")) .arg(Arg::with_name("password") .help("Controller password to register host.") .long("password") .takes_value(true) .default_value("password")) .arg(Arg::with_name("controller-ip") .help("IP address of the controller") .long("controller-ip") .takes_value(true) .default_value("127.0.0.1")) .arg(Arg::with_name("controller-port") .help("Port of the controller") .long("controller-port") .takes_value(true) .default_value("59582")) .arg(Arg::with_name("cold-cache") .help("Whether the cold cache is enabled (0 or 1)") .long("cold-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("warm-cache") .help("Whether the warm cache is enabled (0 or 1)") .long("warm-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("pubsub") .help("Whether pubsub optimization is enabled (0 or 1)") .long("pubsub") .takes_value(true) .default_value("1")) .arg(Arg::with_name("no-mock-network") .help("If the flag is included, uses the real network.") .long("no-mock-network")) .get_matches(); let base_path = Path::new(matches.value_of("path").unwrap()).to_path_buf(); let port: u16 = matches.value_of("port").unwrap().parse().unwrap(); let controller = format!( "http://{}:{}", matches.value_of("controller-ip").unwrap(), matches.value_of("controller-port").unwrap(), ); let password = matches.value_of("password").unwrap(); let cold_cache_enabled = matches.value_of("cold-cache").unwrap() == "1"; let warm_cache_enabled = matches.value_of("warm-cache").unwrap() == "1"; let pubsub_enabled = matches.value_of("pubsub").unwrap() == "1"; let mock_network = !matches.is_present("no-mock-network"); let mut host = Host::new( base_path, &controller, cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, ); host.start(port, password).await.unwrap(); Server::builder() .add_service(KarlHostServer::new(host)) .serve(format!("0.0.0.0:{}", port).parse()?) .await .unwrap(); Ok(()) }
/// forward to the controller. ///
random_line_split
main.rs
#![feature(proc_macro_hygiene, decl_macro)] #![feature(custom_inner_attributes)] #[macro_use] extern crate log; pub mod net; mod runtime; mod cache; mod perms; use cache::PathManager; use perms::*; pub mod protos { tonic::include_proto!("request"); } use protos::karl_host_server::KarlHostServer; use protos::*; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use std::sync::{Arc, Mutex}; use tokio; use tokio::sync::mpsc; use karl_common::*; use reqwest::{self, Method, header::HeaderName}; use tonic::{Request, Response, Status, Code}; use tonic::transport::Server; use clap::{Arg, App}; struct WarmProcess { process_token: ProcessToken, tx: mpsc::Sender<()>, } #[derive(Clone)] pub struct Host { /// Host ID (unique among hosts) id: u32, /// Host API to controller. api: crate::net::KarlHostAPI, /// Active process tokens. process_tokens: Arc<Mutex<HashMap<ProcessToken, ProcessPerms>>>, warm_processes: Arc<Mutex<HashMap<ModuleID, Vec<WarmProcess>>>>, warm_cache_tx: Option<mpsc::Sender<ComputeRequest>>, /// Path manager. path_manager: Arc<PathManager>, /// Only one compute at a time. compute_lock: Arc<Mutex<()>>, /// Whether caching is enabled cold_cache_enabled: bool, warm_cache_enabled: bool, /// Whether to read triggered data locally or forward to the data sink pubsub_enabled: bool, /// Whether to mock network access mock_network: bool, } #[tonic::async_trait] impl karl_host_server::KarlHost for Host { async fn start_compute( &self, req: Request<ComputeRequest>, ) -> Result<Response<NotifyStart>, Status> { let mut req = req.into_inner(); trace!("HANDLE_COMPUTE START {}", req.module_id); if let Some(process_token) = self.attach_warm_process(&mut req).await { Ok(Response::new(NotifyStart { process_token })) } else { let triggered_tag = req.triggered_tag.drain(..).collect(); let triggered_timestamp = req.triggered_timestamp.drain(..).collect(); let is_warm = false; let process_token = Host::spawn_new_process( self.clone(), req, is_warm, triggered_tag, triggered_timestamp, ).await; Ok(Response::new(NotifyStart { process_token })) } } async fn network( &self, req: Request<NetworkAccess>, ) -> Result<Response<NetworkAccessResult>, Status> { debug!("network"); // Validate the process is valid and has permissions to access the network. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower != req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if !self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if !perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and /// forward to the controller. /// /// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn
( &self, req: Request<PushData>, ) -> Result<Response<()>, Status> { debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else { // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await } } } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled || !warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if !is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if !triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if !triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if !req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if !req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached && !cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached && !path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error // what if a second request gets here before the first // request caches the module? race condition return Err(Error::CacheError(format!("module {} is not cached", module_id))); } if !cached { path_manager.cache_module(&module_id, package)?; } debug!("unpacked request => {} s", now.elapsed().as_secs_f32()); let now = Instant::now(); let (mount, paths) = path_manager.new_request(&module_id)?; // info!("=> preprocessing: {} s", now.elapsed().as_secs_f32()); debug!("mounting overlayfs => {} s", now.elapsed().as_secs_f32()); drop(lock); (mount, paths) }; let start = Instant::now(); let _res = runtime::run( &paths.root_path, binary_path, args, envs, )?; let execution_time = start.elapsed(); debug!("invoked binary => {} s", execution_time.as_secs_f32()); trace!("HANDLE_COMPUTE FINISH {}", module_id); // Reset the root for the next computation. path_manager.unmount(mount)?; if let Err(e) = std::fs::remove_dir_all(&paths.request_path) { error!("error resetting request path: {:?}", e); } let now = Instant::now(); trace!( "reset directory at {:?} => {} s", &paths.request_path, now.elapsed().as_secs_f32(), ); Ok(execution_time) } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { env_logger::builder().filter_level(log::LevelFilter::Info).init(); let pwd = std::fs::canonicalize(".")?; let path = format!("{}/.host", std::env::var("KARL_PATH") .unwrap_or(pwd.as_os_str().to_str().unwrap().to_string())); let matches = App::new("Karl Host") .arg(Arg::with_name("path") .help("Absolute path to the host's base directory. \ Caches modules at `<path>/cache/<module_id>`. \ If there are multiple hosts on the same computer, they share \ the same cache. But each host has an automatically generated \ directory `<path>/host-<id>` for processing requests that is \ removed when the host is killed. Each request has a root \ directory at `<path>/host-<id>/<request>`.") .long("path") .takes_value(true) .default_value(&path)) .arg(Arg::with_name("port") .help("Port.") .short("p") .long("port") .takes_value(true) .default_value("59583")) .arg(Arg::with_name("password") .help("Controller password to register host.") .long("password") .takes_value(true) .default_value("password")) .arg(Arg::with_name("controller-ip") .help("IP address of the controller") .long("controller-ip") .takes_value(true) .default_value("127.0.0.1")) .arg(Arg::with_name("controller-port") .help("Port of the controller") .long("controller-port") .takes_value(true) .default_value("59582")) .arg(Arg::with_name("cold-cache") .help("Whether the cold cache is enabled (0 or 1)") .long("cold-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("warm-cache") .help("Whether the warm cache is enabled (0 or 1)") .long("warm-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("pubsub") .help("Whether pubsub optimization is enabled (0 or 1)") .long("pubsub") .takes_value(true) .default_value("1")) .arg(Arg::with_name("no-mock-network") .help("If the flag is included, uses the real network.") .long("no-mock-network")) .get_matches(); let base_path = Path::new(matches.value_of("path").unwrap()).to_path_buf(); let port: u16 = matches.value_of("port").unwrap().parse().unwrap(); let controller = format!( "http://{}:{}", matches.value_of("controller-ip").unwrap(), matches.value_of("controller-port").unwrap(), ); let password = matches.value_of("password").unwrap(); let cold_cache_enabled = matches.value_of("cold-cache").unwrap() == "1"; let warm_cache_enabled = matches.value_of("warm-cache").unwrap() == "1"; let pubsub_enabled = matches.value_of("pubsub").unwrap() == "1"; let mock_network = !matches.is_present("no-mock-network"); let mut host = Host::new( base_path, &controller, cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, ); host.start(port, password).await.unwrap(); Server::builder() .add_service(KarlHostServer::new(host)) .serve(format!("0.0.0.0:{}", port).parse()?) .await .unwrap(); Ok(()) }
push
identifier_name
main.rs
#![feature(proc_macro_hygiene, decl_macro)] #![feature(custom_inner_attributes)] #[macro_use] extern crate log; pub mod net; mod runtime; mod cache; mod perms; use cache::PathManager; use perms::*; pub mod protos { tonic::include_proto!("request"); } use protos::karl_host_server::KarlHostServer; use protos::*; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use std::sync::{Arc, Mutex}; use tokio; use tokio::sync::mpsc; use karl_common::*; use reqwest::{self, Method, header::HeaderName}; use tonic::{Request, Response, Status, Code}; use tonic::transport::Server; use clap::{Arg, App}; struct WarmProcess { process_token: ProcessToken, tx: mpsc::Sender<()>, } #[derive(Clone)] pub struct Host { /// Host ID (unique among hosts) id: u32, /// Host API to controller. api: crate::net::KarlHostAPI, /// Active process tokens. process_tokens: Arc<Mutex<HashMap<ProcessToken, ProcessPerms>>>, warm_processes: Arc<Mutex<HashMap<ModuleID, Vec<WarmProcess>>>>, warm_cache_tx: Option<mpsc::Sender<ComputeRequest>>, /// Path manager. path_manager: Arc<PathManager>, /// Only one compute at a time. compute_lock: Arc<Mutex<()>>, /// Whether caching is enabled cold_cache_enabled: bool, warm_cache_enabled: bool, /// Whether to read triggered data locally or forward to the data sink pubsub_enabled: bool, /// Whether to mock network access mock_network: bool, } #[tonic::async_trait] impl karl_host_server::KarlHost for Host { async fn start_compute( &self, req: Request<ComputeRequest>, ) -> Result<Response<NotifyStart>, Status> { let mut req = req.into_inner(); trace!("HANDLE_COMPUTE START {}", req.module_id); if let Some(process_token) = self.attach_warm_process(&mut req).await { Ok(Response::new(NotifyStart { process_token })) } else { let triggered_tag = req.triggered_tag.drain(..).collect(); let triggered_timestamp = req.triggered_timestamp.drain(..).collect(); let is_warm = false; let process_token = Host::spawn_new_process( self.clone(), req, is_warm, triggered_tag, triggered_timestamp, ).await; Ok(Response::new(NotifyStart { process_token })) } } async fn network( &self, req: Request<NetworkAccess>, ) -> Result<Response<NetworkAccessResult>, Status> { debug!("network"); // Validate the process is valid and has permissions to access the network. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower != req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if !self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if !perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and /// forward to the controller. /// /// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn push( &self, req: Request<PushData>, ) -> Result<Response<()>, Status>
} impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled || !warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if !is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if !triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if !triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if !req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if !req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached && !cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached && !path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error // what if a second request gets here before the first // request caches the module? race condition return Err(Error::CacheError(format!("module {} is not cached", module_id))); } if !cached { path_manager.cache_module(&module_id, package)?; } debug!("unpacked request => {} s", now.elapsed().as_secs_f32()); let now = Instant::now(); let (mount, paths) = path_manager.new_request(&module_id)?; // info!("=> preprocessing: {} s", now.elapsed().as_secs_f32()); debug!("mounting overlayfs => {} s", now.elapsed().as_secs_f32()); drop(lock); (mount, paths) }; let start = Instant::now(); let _res = runtime::run( &paths.root_path, binary_path, args, envs, )?; let execution_time = start.elapsed(); debug!("invoked binary => {} s", execution_time.as_secs_f32()); trace!("HANDLE_COMPUTE FINISH {}", module_id); // Reset the root for the next computation. path_manager.unmount(mount)?; if let Err(e) = std::fs::remove_dir_all(&paths.request_path) { error!("error resetting request path: {:?}", e); } let now = Instant::now(); trace!( "reset directory at {:?} => {} s", &paths.request_path, now.elapsed().as_secs_f32(), ); Ok(execution_time) } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { env_logger::builder().filter_level(log::LevelFilter::Info).init(); let pwd = std::fs::canonicalize(".")?; let path = format!("{}/.host", std::env::var("KARL_PATH") .unwrap_or(pwd.as_os_str().to_str().unwrap().to_string())); let matches = App::new("Karl Host") .arg(Arg::with_name("path") .help("Absolute path to the host's base directory. \ Caches modules at `<path>/cache/<module_id>`. \ If there are multiple hosts on the same computer, they share \ the same cache. But each host has an automatically generated \ directory `<path>/host-<id>` for processing requests that is \ removed when the host is killed. Each request has a root \ directory at `<path>/host-<id>/<request>`.") .long("path") .takes_value(true) .default_value(&path)) .arg(Arg::with_name("port") .help("Port.") .short("p") .long("port") .takes_value(true) .default_value("59583")) .arg(Arg::with_name("password") .help("Controller password to register host.") .long("password") .takes_value(true) .default_value("password")) .arg(Arg::with_name("controller-ip") .help("IP address of the controller") .long("controller-ip") .takes_value(true) .default_value("127.0.0.1")) .arg(Arg::with_name("controller-port") .help("Port of the controller") .long("controller-port") .takes_value(true) .default_value("59582")) .arg(Arg::with_name("cold-cache") .help("Whether the cold cache is enabled (0 or 1)") .long("cold-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("warm-cache") .help("Whether the warm cache is enabled (0 or 1)") .long("warm-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("pubsub") .help("Whether pubsub optimization is enabled (0 or 1)") .long("pubsub") .takes_value(true) .default_value("1")) .arg(Arg::with_name("no-mock-network") .help("If the flag is included, uses the real network.") .long("no-mock-network")) .get_matches(); let base_path = Path::new(matches.value_of("path").unwrap()).to_path_buf(); let port: u16 = matches.value_of("port").unwrap().parse().unwrap(); let controller = format!( "http://{}:{}", matches.value_of("controller-ip").unwrap(), matches.value_of("controller-port").unwrap(), ); let password = matches.value_of("password").unwrap(); let cold_cache_enabled = matches.value_of("cold-cache").unwrap() == "1"; let warm_cache_enabled = matches.value_of("warm-cache").unwrap() == "1"; let pubsub_enabled = matches.value_of("pubsub").unwrap() == "1"; let mock_network = !matches.is_present("no-mock-network"); let mut host = Host::new( base_path, &controller, cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, ); host.start(port, password).await.unwrap(); Server::builder() .add_service(KarlHostServer::new(host)) .serve(format!("0.0.0.0:{}", port).parse()?) .await .unwrap(); Ok(()) }
{ debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else { // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await } }
identifier_body
main.rs
#![feature(proc_macro_hygiene, decl_macro)] #![feature(custom_inner_attributes)] #[macro_use] extern crate log; pub mod net; mod runtime; mod cache; mod perms; use cache::PathManager; use perms::*; pub mod protos { tonic::include_proto!("request"); } use protos::karl_host_server::KarlHostServer; use protos::*; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use std::sync::{Arc, Mutex}; use tokio; use tokio::sync::mpsc; use karl_common::*; use reqwest::{self, Method, header::HeaderName}; use tonic::{Request, Response, Status, Code}; use tonic::transport::Server; use clap::{Arg, App}; struct WarmProcess { process_token: ProcessToken, tx: mpsc::Sender<()>, } #[derive(Clone)] pub struct Host { /// Host ID (unique among hosts) id: u32, /// Host API to controller. api: crate::net::KarlHostAPI, /// Active process tokens. process_tokens: Arc<Mutex<HashMap<ProcessToken, ProcessPerms>>>, warm_processes: Arc<Mutex<HashMap<ModuleID, Vec<WarmProcess>>>>, warm_cache_tx: Option<mpsc::Sender<ComputeRequest>>, /// Path manager. path_manager: Arc<PathManager>, /// Only one compute at a time. compute_lock: Arc<Mutex<()>>, /// Whether caching is enabled cold_cache_enabled: bool, warm_cache_enabled: bool, /// Whether to read triggered data locally or forward to the data sink pubsub_enabled: bool, /// Whether to mock network access mock_network: bool, } #[tonic::async_trait] impl karl_host_server::KarlHost for Host { async fn start_compute( &self, req: Request<ComputeRequest>, ) -> Result<Response<NotifyStart>, Status> { let mut req = req.into_inner(); trace!("HANDLE_COMPUTE START {}", req.module_id); if let Some(process_token) = self.attach_warm_process(&mut req).await { Ok(Response::new(NotifyStart { process_token })) } else { let triggered_tag = req.triggered_tag.drain(..).collect(); let triggered_timestamp = req.triggered_timestamp.drain(..).collect(); let is_warm = false; let process_token = Host::spawn_new_process( self.clone(), req, is_warm, triggered_tag, triggered_timestamp, ).await; Ok(Response::new(NotifyStart { process_token })) } } async fn network( &self, req: Request<NetworkAccess>, ) -> Result<Response<NetworkAccessResult>, Status> { debug!("network"); // Validate the process is valid and has permissions to access the network. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_access_domain(&req.domain) { return Err(Status::new(Code::Unauthenticated, "invalid network access")); } } // Build the network request let method = match req.method.as_str() { "GET" => Method::GET, "POST" => Method::POST, "PUT" => Method::PUT, "DELETE" => Method::DELETE, "HEAD" => Method::HEAD, "OPTIONS" => Method::OPTIONS, "CONNECT" => Method::CONNECT, "PATCH" => Method::PATCH, "TRACE" => Method::TRACE, _ => { return Err(Status::new(Code::InvalidArgument, "invalid http method")) }, }; let mut builder = reqwest::Client::new() .request(method, req.domain.clone()) .timeout(Duration::from_secs(1)) .body(req.body.clone()); for header in &req.headers { let key = HeaderName::from_bytes(&header.key[..]) .map_err(|e| { error!("{}", e); Status::new(Code::InvalidArgument, "invalid header name") })?; builder = builder.header(key, &header.value[..]); } if self.mock_network { // Forward the network access to the controller. warn!("mock network request"); warn!("finish diff_priv_pipeline (statistics sent): {:?}", Instant::now()); self.api.forward_network(req).await?; Ok(Response::new(NetworkAccessResult::default())) } else { // Make the actual network access let handle = tokio::spawn(async move { let res = builder.send().await.map_err(|e| { error!("{}", e); Status::new(Code::Aborted, "http request failed") })?; let status_code = res.status().as_u16() as u32; let headers = res .headers() .iter() .map(|(key, value)| KeyValuePair { key: key.as_str().as_bytes().to_vec(), value: value.as_bytes().to_vec(), }) .collect::<Vec<_>>(); let data = res.bytes().await.map_err(|e| { error!("{}", e); Status::new(Code::Unavailable, "error streaming response bytes") })?; Ok(Response::new(NetworkAccessResult { status_code, headers, data: data.to_vec(), })) }); // Forward the network access to the controller. self.api.forward_network(req).await?; // Return the result of the HTTP request. handle.await.map_err(|e| Status::new(Code::Internal, format!("{}", e)))? } } /// Validates the process is an existing process, and checks its /// permissions to see that the tag corresponds to a valid param. /// If the tag is valid and this is a stateless edge, only respond /// succesfully if the module is trying to get the triggered data. /// If the tag is valid and this is a stateful edge, endorse the data /// with the host token and forward to the controller. async fn get( &self, req: Request<GetData>, ) -> Result<Response<GetDataResult>, Status> { debug!("get"); // Validate the process is valid and has permissions to read the tag. // No serializability guarantees from other requests from the same process. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { warn!("get: invalid token {}", req.process_token); return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if perms.is_triggered(&req.tag) { // cached the triggered file if req.lower != req.upper { debug!("get: {} invalid triggered timestamps", req.process_token); return Ok(Response::new(GetDataResult::default())) } else if !self.pubsub_enabled { debug!("get: {} pubsub disabled, fallthrough to read from data sink", req.process_token); // fallthrough below } else if let Some(data) = perms.read_triggered(&req.lower) { debug!("get: {} reading triggered data", req.process_token); return Ok(Response::new(GetDataResult { timestamps: vec![req.lower], data: vec![data], })) } else { debug!("get: {} process was not triggered", req.process_token); return Ok(Response::new(GetDataResult::default())) } } else if !perms.can_read(&req.tag) { warn!("get: {} cannot read {}", req.process_token, req.tag); return Err(Status::new(Code::Unauthenticated, "cannot read")); } } // Forward the file access to the controller and return the result debug!("get: {} forwarding tag={}", req.process_token, req.tag); self.api.forward_get(req).await } /// Validates the process is an existing process, and checks its /// permissions to see that the process is writing to a valid tag. /// If the tag is valid, endorse the data with the host token and /// forward to the controller. /// /// If the tag corresponds to sensor state (say maybe it starts with # /// which is reserved for state tags), forward the request as a state /// change instead. async fn push( &self, req: Request<PushData>, ) -> Result<Response<()>, Status> { debug!("push"); // Validate the process is valid and has permissions to write the file. // No serializability guarantees from other requests from the same process. // Sanitizes the path. let req = req.into_inner(); let rx = { if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { perms.touch() } else { return Err(Status::new(Code::Unauthenticated, "invalid process token")); } }; if let Some(mut rx) = rx { debug!("warm process awaiting..."); rx.recv().await; } let sensor_key = if let Some(perms) = self.process_tokens.lock().unwrap().get_mut(&req.process_token) { if !perms.can_write(&req.tag) { debug!("push: {} cannot write tag={}, silently failing", req.process_token, req.tag); return Ok(Response::new(())); } if state_tags::is_state_tag(&req.tag) { Some(state_tags::parse_state_tag(&req.tag)) } else { None } } else { unreachable!() }; if let Some((sensor, key)) = sensor_key { // Forward as state change if the tag changes state. debug!("push: {} forwarding state change tag={}", req.process_token, req.tag); let req = StateChange { host_token: String::new(), process_token: req.process_token, sensor_id: sensor, key, value: req.data, }; self.api.forward_state(req).await } else
} } impl Host { /// Generate a new host with a random ID. pub fn new( base_path: PathBuf, controller: &str, cold_cache_enabled: bool, warm_cache_enabled: bool, pubsub_enabled: bool, mock_network: bool, ) -> Self { use rand::Rng; let id: u32 = rand::thread_rng().gen(); assert!(cold_cache_enabled || !warm_cache_enabled); // TODO: buffer size Self { id, api: crate::net::KarlHostAPI::new(controller), process_tokens: Arc::new(Mutex::new(HashMap::new())), warm_processes: Arc::new(Mutex::new(HashMap::new())), warm_cache_tx: None, // wish this didn't have to be wrapped path_manager: Arc::new(PathManager::new(base_path, id)), compute_lock: Arc::new(Mutex::new(())), cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, } } /// Spawns a background process that sends heartbeats to the controller /// at the HEARTBEAT_INTERVAL. /// /// The constructor creates a directory at the <KARL_PATH> if it does /// not already exist. The working directory for any computation is at /// <KARL_PATH>/<LISTENER_ID>. When not doing computation, the working /// directory must be at <KARL_PATH>. /// /// Parameters: /// - port - The port to listen on. /// - password - The password to register with the controller. pub async fn start(&mut self, port: u16, password: &str) -> Result<(), Status> { self.api.register(self.id, port, password).await?; let api = self.api.clone(); tokio::spawn(async move { // Every HEARTBEAT_INTERVAL seconds, this process wakes up // sends a heartbeat message to the controller. loop { tokio::time::sleep(Duration::from_secs(HEARTBEAT_INTERVAL)).await; trace!("heartbeat"); let res = api.heartbeat().await; if let Err(e) = res { warn!("error sending heartbeat: {}", e); }; } }); // listener for spawning warm processes let (tx, mut rx) = mpsc::channel::<ComputeRequest>(100); self.warm_cache_tx = Some(tx); let host = self.clone(); tokio::spawn(async move { loop { let req: ComputeRequest = rx.recv().await.unwrap(); let is_warm = true; Host::spawn_new_process( host.clone(), req, is_warm, TRIGGERED_KEY.to_string(), // special value TRIGGERED_KEY.to_string(), // special value ).await; } }); Ok(()) } async fn attach_warm_process( &self, req: &mut ComputeRequest, ) -> Option<ProcessToken> { let warm_process = { let mut warm_processes = self.warm_processes.lock().unwrap(); let mut process_tokens = self.process_tokens.lock().unwrap(); let mut process: Option<WarmProcess> = None; if let Some(processes) = warm_processes.get_mut(&req.module_id) { // reserve the process token process = processes.pop(); } if let Some(process) = process { process_tokens .get_mut(&process.process_token).unwrap() .set_compute_request(req); process } else { return None; } }; // permissions are set and warm process can continue info!("attaching: {} ({})", req.module_id, warm_process.process_token); warm_process.tx.send(()).await.unwrap(); Some(warm_process.process_token) } async fn spawn_new_process( host: Host, mut req: ComputeRequest, is_warm: bool, triggered_tag: String, triggered_timestamp: String, ) -> ProcessToken { let process_token = Token::gen(); let (perms, tx) = if !is_warm { info!("spawning cold process: {} ({})", req.module_id, process_token); (ProcessPerms::new(&mut req), None) } else { info!("spawning warm process: {} ({})", req.module_id, process_token); let (perms, tx) = ProcessPerms::new_warm_cache(); (perms, Some(tx)) }; // Mark an active process { let mut process_tokens = host.process_tokens.lock().unwrap(); assert!(!process_tokens.contains_key(&process_token)); process_tokens.insert(process_token.clone(), perms); } // If it's warm insert a sending channel to eventually notify // this process it is ready to continue if let Some(tx) = tx { host.warm_processes.lock().unwrap() .entry(req.module_id.clone()) .or_insert(vec![]) .push(WarmProcess { process_token: process_token.clone(), tx, }); } // Handle the process asynchronously #[cfg(target_os = "linux")] { let binary_path = Path::new(&req.binary_path).to_path_buf(); let process_token = process_token.clone(); tokio::spawn(async move { let original_req = req.clone(); if !triggered_tag.is_empty() { req.envs.push(format!("TRIGGERED_TAG={}", &triggered_tag)); } if !triggered_timestamp.is_empty() { req.envs.push(format!("TRIGGERED_TIMESTAMP={}", &triggered_timestamp)); } req.envs.push(format!("PROCESS_TOKEN={}", &process_token)); if !req.params.is_empty() { req.envs.push(format!("KARL_PARAMS={}", &req.params)); } if !req.returns.is_empty() { req.envs.push(format!("KARL_RETURNS={}", &req.returns)); } let execution_time = Host::handle_compute( host.compute_lock.clone(), host.path_manager.clone(), req.module_id, req.cached, host.cold_cache_enabled, req.package, binary_path, req.args, req.envs, ).unwrap(); host.process_tokens.lock().unwrap().remove(&process_token); host.api.notify_end(process_token).await.unwrap(); // Now that the compute request is finished, evaluate its // initialization time. If the initialization time was high, // recursively call this function but as a warm cache module. // We assume initialization time is high if the warm cache // is enabled. let _long_init_time = execution_time > Duration::from_secs(5); if host.warm_cache_enabled { debug!("execution_time was {:?}, spawning warm modules anyway", execution_time); host.warm_cache_tx.as_ref().unwrap().send(original_req).await.unwrap(); } }); } #[cfg(not(target_os = "linux"))] { unimplemented!() } process_token } /// Handle a compute request. /// /// Returns the execution time. #[cfg(target_os = "linux")] fn handle_compute( lock: Arc<Mutex<()>>, path_manager: Arc<PathManager>, module_id: ModuleID, cached: bool, cold_cache_enabled: bool, package: Vec<u8>, binary_path: PathBuf, args: Vec<String>, envs: Vec<String>, ) -> Result<Duration, Error> { let now = Instant::now(); if cached && !cold_cache_enabled { return Err(Error::CacheError("caching is disabled".to_string())); } // TODO: lock on finer granularity, just the specific module // But gets a lock around the filesystem so multiple people // aren't handling compute requests that could be cached. // And so that each request can create a directory for its process. let (mount, paths) = { let lock = lock.lock().unwrap(); debug!("cached={} cold_cache_enabled={}", cached, cold_cache_enabled); if cached && !path_manager.is_cached(&module_id) { // TODO: controller needs to handle this error // what if a second request gets here before the first // request caches the module? race condition return Err(Error::CacheError(format!("module {} is not cached", module_id))); } if !cached { path_manager.cache_module(&module_id, package)?; } debug!("unpacked request => {} s", now.elapsed().as_secs_f32()); let now = Instant::now(); let (mount, paths) = path_manager.new_request(&module_id)?; // info!("=> preprocessing: {} s", now.elapsed().as_secs_f32()); debug!("mounting overlayfs => {} s", now.elapsed().as_secs_f32()); drop(lock); (mount, paths) }; let start = Instant::now(); let _res = runtime::run( &paths.root_path, binary_path, args, envs, )?; let execution_time = start.elapsed(); debug!("invoked binary => {} s", execution_time.as_secs_f32()); trace!("HANDLE_COMPUTE FINISH {}", module_id); // Reset the root for the next computation. path_manager.unmount(mount)?; if let Err(e) = std::fs::remove_dir_all(&paths.request_path) { error!("error resetting request path: {:?}", e); } let now = Instant::now(); trace!( "reset directory at {:?} => {} s", &paths.request_path, now.elapsed().as_secs_f32(), ); Ok(execution_time) } } #[tokio::main] async fn main() -> Result<(), Box<dyn std::error::Error>> { env_logger::builder().filter_level(log::LevelFilter::Info).init(); let pwd = std::fs::canonicalize(".")?; let path = format!("{}/.host", std::env::var("KARL_PATH") .unwrap_or(pwd.as_os_str().to_str().unwrap().to_string())); let matches = App::new("Karl Host") .arg(Arg::with_name("path") .help("Absolute path to the host's base directory. \ Caches modules at `<path>/cache/<module_id>`. \ If there are multiple hosts on the same computer, they share \ the same cache. But each host has an automatically generated \ directory `<path>/host-<id>` for processing requests that is \ removed when the host is killed. Each request has a root \ directory at `<path>/host-<id>/<request>`.") .long("path") .takes_value(true) .default_value(&path)) .arg(Arg::with_name("port") .help("Port.") .short("p") .long("port") .takes_value(true) .default_value("59583")) .arg(Arg::with_name("password") .help("Controller password to register host.") .long("password") .takes_value(true) .default_value("password")) .arg(Arg::with_name("controller-ip") .help("IP address of the controller") .long("controller-ip") .takes_value(true) .default_value("127.0.0.1")) .arg(Arg::with_name("controller-port") .help("Port of the controller") .long("controller-port") .takes_value(true) .default_value("59582")) .arg(Arg::with_name("cold-cache") .help("Whether the cold cache is enabled (0 or 1)") .long("cold-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("warm-cache") .help("Whether the warm cache is enabled (0 or 1)") .long("warm-cache") .takes_value(true) .default_value("1")) .arg(Arg::with_name("pubsub") .help("Whether pubsub optimization is enabled (0 or 1)") .long("pubsub") .takes_value(true) .default_value("1")) .arg(Arg::with_name("no-mock-network") .help("If the flag is included, uses the real network.") .long("no-mock-network")) .get_matches(); let base_path = Path::new(matches.value_of("path").unwrap()).to_path_buf(); let port: u16 = matches.value_of("port").unwrap().parse().unwrap(); let controller = format!( "http://{}:{}", matches.value_of("controller-ip").unwrap(), matches.value_of("controller-port").unwrap(), ); let password = matches.value_of("password").unwrap(); let cold_cache_enabled = matches.value_of("cold-cache").unwrap() == "1"; let warm_cache_enabled = matches.value_of("warm-cache").unwrap() == "1"; let pubsub_enabled = matches.value_of("pubsub").unwrap() == "1"; let mock_network = !matches.is_present("no-mock-network"); let mut host = Host::new( base_path, &controller, cold_cache_enabled, warm_cache_enabled, pubsub_enabled, mock_network, ); host.start(port, password).await.unwrap(); Server::builder() .add_service(KarlHostServer::new(host)) .serve(format!("0.0.0.0:{}", port).parse()?) .await .unwrap(); Ok(()) }
{ // Forward the file access to the controller and return the result debug!("push: {} forwarding push tag={}", req.process_token, req.tag); self.api.forward_push(req).await }
conditional_block
parser.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 //! Parsing of Verilog vmem files into the [`Vmem`] representation. //! //! See the [srec_vmem] documentation for a description of the file format. //! //! To summarise: //! * Files specify hexadecimal data for sequential addresses. //! * Start addresses for a run can be specified in hex with '@____'. //! * Address and data values are separated by whitespace or comments. //! * C-style '//' and '/* */' comments are supported. //! //! [srec_vmem]: https://srecord.sourceforge.net/man/man5/srec_vmem.5.html use std::num::ParseIntError; use thiserror::Error; use super::{Section, Vmem}; pub type ParseResult<T> = Result<T, ParseError>; /// Errors that can occur when parsing vmem files. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ParseError { /// Failure to parse an integer from hexadecimal. #[error("failed to parse as hexadecimal integer")] ParseInt(#[from] ParseIntError), /// An opened comment was not closed. #[error("unclosed comment")] UnclosedComment, /// An address was started with an '@' character, but no address value followed. #[error("address is missing a value")] AddrMissingValue, /// Catch-all for any characters that don't belong in vmem files. #[error("unknown character '{0}'")] UnknownChar(char), } /// Representation of the possible tokens found in vmem files. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Token { /// End of file. Eof, /// Address directive, e.g. `@123abc`. Addr(u32), /// Data value, e.g. `abc123`. Value(u32), /// Comments, e.g. `/* comment */` or `// comment`. Comment, /// Whitespace, including newlines. Whitespace, } /// Some span of the input text representing a token. #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct Span { token: Token, len: usize, } /// Parser for vmem files. pub struct VmemParser; impl VmemParser { /// Parse a complete vmem file from a string. pub fn parse(mut s: &str) -> ParseResult<Vmem> { // Build up the vmem file as sections. let mut vmem = Vmem::default(); vmem.sections.push(Section::default()); loop { // Parse a token from the input string, and move along by its span. let Span { len, token } = Self::token(s)?; s = &s[len..]; match token { Token::Eof => break, Token::Addr(addr) => { // Add a new section to the `Vmem` at this address. // Here we translate between a "word index" to a byte address. vmem.sections.push(Section { addr: addr * 4, data: Vec::new(), }); } Token::Value(value) => { // Add the value to the current (last added) section's data. let section = vmem.sections.last_mut().unwrap(); section.data.push(value) } // Whitespace and comments are ignored. Token::Whitespace => continue, Token::Comment => continue, } } Ok(vmem) } /// Parse a single token from the beginning of a string. fn token(s: &str) -> ParseResult<Span> { let parsers = [ Self::parse_eof, Self::parse_addr, Self::parse_value, Self::parse_comment, Self::parse_whitespace, ]; // Run each parser in order, stopping when one gets a matching parse. let span = parsers.iter().find_map(|p| p(s).transpose()); // If no parsers succeeded, return an error. match span { Some(span) => span, None => Err(ParseError::UnknownChar(s.chars().next().unwrap())), } } /// Try to parse an EOF from the beginning of a string. fn parse_eof(s: &str) -> ParseResult<Option<Span>> { // Empty strings give a 0-length `Token::Eof` span. match s.is_empty() { true => Ok(Some(Span { len: 0, token: Token::Eof, })), false => Ok(None), } } /// Try to parse an address from the beginning of a string. fn parse_addr(s: &str) -> ParseResult<Option<Span>> { // Check for the beginning '@' symbol. let Some(addr) = s.strip_prefix('@') else { return Ok(None); }; // Find the length of the actual address string. let addr_len = match addr.find(|c: char| !c.is_ascii_hexdigit()) { Some(0) => return Err(ParseError::AddrMissingValue), Some(len) => len, None => addr.len(), }; // Ensure the '@' is included in the span's length! let len = '@'.len_utf8() + addr_len; // Parse from hexadecimal. let val = u32::from_str_radix(&addr[..addr_len], 16)?; let token = Token::Addr(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a value from the beginning of a string. fn parse_value(s: &str) -> ParseResult<Option<Span>> { // Check for hexadecimal characters in the input. let len = match s.find(|c: char| !c.is_ascii_hexdigit()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let val = u32::from_str_radix(&s[..len], 16)?; let token = Token::Value(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a comment from the beginning of a string. fn parse_comment(s: &str) -> ParseResult<Option<Span>> { // Look for commend identifiers and their closers. let len = match s { s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()), s if s.starts_with("/*") => { // `find` gives us the _start_ of the `*/`, so include its length as well. s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len() } _ => return Ok(None), }; let token = Token::Comment; let span = Span { token, len }; Ok(Some(span)) } /// Try to parse whitespace from the beginning of a string. fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> { // Check for whitespace at the beginning of the input. let len = match s.find(|c: char| !c.is_whitespace()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let token = Token::Whitespace; let span = Span { len, token }; Ok(Some(span)) } } #[cfg(test)] mod test { use super::*; #[test] fn parse() { let input = r#" AB // comment CD EF @42 12 /* comment */ 34 "#; let expected = Vmem { sections: vec![ Section { addr: 0x00, data: vec![0xAB, 0xCD, 0xEF], }, Section { addr: 0x108, data: vec![0x12, 0x34], }, ], }; assert_eq!(VmemParser::parse(input).unwrap(), expected); } #[test] fn
() { // Check we can pick out the correct token from a string: let expected = [ ("", Token::Eof, 0), ("@ff", Token::Addr(0xff), 3), ("ff", Token::Value(0xff), 2), ("// X", Token::Comment, 4), ("/* X */", Token::Comment, 7), (" ", Token::Whitespace, 2), ]; for (s, token, len) in expected { let span = Span { token, len }; assert_eq!(VmemParser::token(s), Ok(span)); } // Unknown non-token: assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X'))); } #[test] fn eof() { // Not EOF: assert_eq!(VmemParser::parse_eof(" ").unwrap(), None); // EOF: let expected = Some(Span { len: 0, token: Token::Eof, }); assert_eq!(VmemParser::parse_eof("").unwrap(), expected); } #[test] fn addr() { // No address: assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None); let expected = Some(Span { len: 9, token: Token::Addr(0x0123abcd), }); // Partially an address: assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected); // Entirely an address: assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_addr("@123456789").is_err()); // Missing address after '@': assert!(VmemParser::parse_addr("@").is_err()); assert!(VmemParser::parse_addr("@ FF").is_err()); } #[test] fn value() { // No value: assert_eq!(VmemParser::parse_value("/* X */").unwrap(), None); let expected = Some(Span { len: 8, token: Token::Value(0x0123abcd), }); // Partially a value: assert_eq!(VmemParser::parse_value("0123ABCD FF").unwrap(), expected); // Entirely a value: assert_eq!(VmemParser::parse_value("0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_value("0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_value("123456789").is_err()); } #[test] fn comment() { // No whitespace: assert_eq!(VmemParser::parse_comment("FF").unwrap(), None); let expected = Some(Span { len: 7, token: Token::Comment, }); // Partial block comment: assert_eq!(VmemParser::parse_comment("/* X */ FF").unwrap(), expected); // Entirely a block comment: assert_eq!(VmemParser::parse_comment("/* X */").unwrap(), expected); // Unclosed block comment: assert!(VmemParser::parse_comment("/* X").is_err()); // Line comment ending in newline: assert_eq!( VmemParser::parse_comment(concat!("// XXXX", '\n', "FF")).unwrap(), expected ); // Line comment ending at EOF: assert_eq!(VmemParser::parse_comment("// XXXX").unwrap(), expected); } #[test] fn whitespace() { // No whitespace: assert_eq!(VmemParser::parse_whitespace("FF").unwrap(), None); let expected = Some(Span { len: 2, token: Token::Whitespace, }); // Partial whitespace: assert_eq!(VmemParser::parse_whitespace(" FF").unwrap(), expected); // Entirely whitespace: assert_eq!(VmemParser::parse_whitespace(" ").unwrap(), expected); } }
token
identifier_name
parser.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 //! Parsing of Verilog vmem files into the [`Vmem`] representation. //! //! See the [srec_vmem] documentation for a description of the file format. //! //! To summarise: //! * Files specify hexadecimal data for sequential addresses. //! * Start addresses for a run can be specified in hex with '@____'. //! * Address and data values are separated by whitespace or comments. //! * C-style '//' and '/* */' comments are supported. //! //! [srec_vmem]: https://srecord.sourceforge.net/man/man5/srec_vmem.5.html use std::num::ParseIntError; use thiserror::Error; use super::{Section, Vmem}; pub type ParseResult<T> = Result<T, ParseError>; /// Errors that can occur when parsing vmem files. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ParseError { /// Failure to parse an integer from hexadecimal. #[error("failed to parse as hexadecimal integer")] ParseInt(#[from] ParseIntError), /// An opened comment was not closed. #[error("unclosed comment")] UnclosedComment, /// An address was started with an '@' character, but no address value followed. #[error("address is missing a value")]
/// Catch-all for any characters that don't belong in vmem files. #[error("unknown character '{0}'")] UnknownChar(char), } /// Representation of the possible tokens found in vmem files. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Token { /// End of file. Eof, /// Address directive, e.g. `@123abc`. Addr(u32), /// Data value, e.g. `abc123`. Value(u32), /// Comments, e.g. `/* comment */` or `// comment`. Comment, /// Whitespace, including newlines. Whitespace, } /// Some span of the input text representing a token. #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct Span { token: Token, len: usize, } /// Parser for vmem files. pub struct VmemParser; impl VmemParser { /// Parse a complete vmem file from a string. pub fn parse(mut s: &str) -> ParseResult<Vmem> { // Build up the vmem file as sections. let mut vmem = Vmem::default(); vmem.sections.push(Section::default()); loop { // Parse a token from the input string, and move along by its span. let Span { len, token } = Self::token(s)?; s = &s[len..]; match token { Token::Eof => break, Token::Addr(addr) => { // Add a new section to the `Vmem` at this address. // Here we translate between a "word index" to a byte address. vmem.sections.push(Section { addr: addr * 4, data: Vec::new(), }); } Token::Value(value) => { // Add the value to the current (last added) section's data. let section = vmem.sections.last_mut().unwrap(); section.data.push(value) } // Whitespace and comments are ignored. Token::Whitespace => continue, Token::Comment => continue, } } Ok(vmem) } /// Parse a single token from the beginning of a string. fn token(s: &str) -> ParseResult<Span> { let parsers = [ Self::parse_eof, Self::parse_addr, Self::parse_value, Self::parse_comment, Self::parse_whitespace, ]; // Run each parser in order, stopping when one gets a matching parse. let span = parsers.iter().find_map(|p| p(s).transpose()); // If no parsers succeeded, return an error. match span { Some(span) => span, None => Err(ParseError::UnknownChar(s.chars().next().unwrap())), } } /// Try to parse an EOF from the beginning of a string. fn parse_eof(s: &str) -> ParseResult<Option<Span>> { // Empty strings give a 0-length `Token::Eof` span. match s.is_empty() { true => Ok(Some(Span { len: 0, token: Token::Eof, })), false => Ok(None), } } /// Try to parse an address from the beginning of a string. fn parse_addr(s: &str) -> ParseResult<Option<Span>> { // Check for the beginning '@' symbol. let Some(addr) = s.strip_prefix('@') else { return Ok(None); }; // Find the length of the actual address string. let addr_len = match addr.find(|c: char| !c.is_ascii_hexdigit()) { Some(0) => return Err(ParseError::AddrMissingValue), Some(len) => len, None => addr.len(), }; // Ensure the '@' is included in the span's length! let len = '@'.len_utf8() + addr_len; // Parse from hexadecimal. let val = u32::from_str_radix(&addr[..addr_len], 16)?; let token = Token::Addr(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a value from the beginning of a string. fn parse_value(s: &str) -> ParseResult<Option<Span>> { // Check for hexadecimal characters in the input. let len = match s.find(|c: char| !c.is_ascii_hexdigit()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let val = u32::from_str_radix(&s[..len], 16)?; let token = Token::Value(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a comment from the beginning of a string. fn parse_comment(s: &str) -> ParseResult<Option<Span>> { // Look for commend identifiers and their closers. let len = match s { s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()), s if s.starts_with("/*") => { // `find` gives us the _start_ of the `*/`, so include its length as well. s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len() } _ => return Ok(None), }; let token = Token::Comment; let span = Span { token, len }; Ok(Some(span)) } /// Try to parse whitespace from the beginning of a string. fn parse_whitespace(s: &str) -> ParseResult<Option<Span>> { // Check for whitespace at the beginning of the input. let len = match s.find(|c: char| !c.is_whitespace()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let token = Token::Whitespace; let span = Span { len, token }; Ok(Some(span)) } } #[cfg(test)] mod test { use super::*; #[test] fn parse() { let input = r#" AB // comment CD EF @42 12 /* comment */ 34 "#; let expected = Vmem { sections: vec![ Section { addr: 0x00, data: vec![0xAB, 0xCD, 0xEF], }, Section { addr: 0x108, data: vec![0x12, 0x34], }, ], }; assert_eq!(VmemParser::parse(input).unwrap(), expected); } #[test] fn token() { // Check we can pick out the correct token from a string: let expected = [ ("", Token::Eof, 0), ("@ff", Token::Addr(0xff), 3), ("ff", Token::Value(0xff), 2), ("// X", Token::Comment, 4), ("/* X */", Token::Comment, 7), (" ", Token::Whitespace, 2), ]; for (s, token, len) in expected { let span = Span { token, len }; assert_eq!(VmemParser::token(s), Ok(span)); } // Unknown non-token: assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X'))); } #[test] fn eof() { // Not EOF: assert_eq!(VmemParser::parse_eof(" ").unwrap(), None); // EOF: let expected = Some(Span { len: 0, token: Token::Eof, }); assert_eq!(VmemParser::parse_eof("").unwrap(), expected); } #[test] fn addr() { // No address: assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None); let expected = Some(Span { len: 9, token: Token::Addr(0x0123abcd), }); // Partially an address: assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected); // Entirely an address: assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_addr("@123456789").is_err()); // Missing address after '@': assert!(VmemParser::parse_addr("@").is_err()); assert!(VmemParser::parse_addr("@ FF").is_err()); } #[test] fn value() { // No value: assert_eq!(VmemParser::parse_value("/* X */").unwrap(), None); let expected = Some(Span { len: 8, token: Token::Value(0x0123abcd), }); // Partially a value: assert_eq!(VmemParser::parse_value("0123ABCD FF").unwrap(), expected); // Entirely a value: assert_eq!(VmemParser::parse_value("0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_value("0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_value("123456789").is_err()); } #[test] fn comment() { // No whitespace: assert_eq!(VmemParser::parse_comment("FF").unwrap(), None); let expected = Some(Span { len: 7, token: Token::Comment, }); // Partial block comment: assert_eq!(VmemParser::parse_comment("/* X */ FF").unwrap(), expected); // Entirely a block comment: assert_eq!(VmemParser::parse_comment("/* X */").unwrap(), expected); // Unclosed block comment: assert!(VmemParser::parse_comment("/* X").is_err()); // Line comment ending in newline: assert_eq!( VmemParser::parse_comment(concat!("// XXXX", '\n', "FF")).unwrap(), expected ); // Line comment ending at EOF: assert_eq!(VmemParser::parse_comment("// XXXX").unwrap(), expected); } #[test] fn whitespace() { // No whitespace: assert_eq!(VmemParser::parse_whitespace("FF").unwrap(), None); let expected = Some(Span { len: 2, token: Token::Whitespace, }); // Partial whitespace: assert_eq!(VmemParser::parse_whitespace(" FF").unwrap(), expected); // Entirely whitespace: assert_eq!(VmemParser::parse_whitespace(" ").unwrap(), expected); } }
AddrMissingValue,
random_line_split
parser.rs
// Copyright lowRISC contributors. // Licensed under the Apache License, Version 2.0, see LICENSE for details. // SPDX-License-Identifier: Apache-2.0 //! Parsing of Verilog vmem files into the [`Vmem`] representation. //! //! See the [srec_vmem] documentation for a description of the file format. //! //! To summarise: //! * Files specify hexadecimal data for sequential addresses. //! * Start addresses for a run can be specified in hex with '@____'. //! * Address and data values are separated by whitespace or comments. //! * C-style '//' and '/* */' comments are supported. //! //! [srec_vmem]: https://srecord.sourceforge.net/man/man5/srec_vmem.5.html use std::num::ParseIntError; use thiserror::Error; use super::{Section, Vmem}; pub type ParseResult<T> = Result<T, ParseError>; /// Errors that can occur when parsing vmem files. #[derive(Clone, Debug, Error, PartialEq, Eq)] pub enum ParseError { /// Failure to parse an integer from hexadecimal. #[error("failed to parse as hexadecimal integer")] ParseInt(#[from] ParseIntError), /// An opened comment was not closed. #[error("unclosed comment")] UnclosedComment, /// An address was started with an '@' character, but no address value followed. #[error("address is missing a value")] AddrMissingValue, /// Catch-all for any characters that don't belong in vmem files. #[error("unknown character '{0}'")] UnknownChar(char), } /// Representation of the possible tokens found in vmem files. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Token { /// End of file. Eof, /// Address directive, e.g. `@123abc`. Addr(u32), /// Data value, e.g. `abc123`. Value(u32), /// Comments, e.g. `/* comment */` or `// comment`. Comment, /// Whitespace, including newlines. Whitespace, } /// Some span of the input text representing a token. #[derive(Clone, Copy, Debug, PartialEq, Eq)] struct Span { token: Token, len: usize, } /// Parser for vmem files. pub struct VmemParser; impl VmemParser { /// Parse a complete vmem file from a string. pub fn parse(mut s: &str) -> ParseResult<Vmem> { // Build up the vmem file as sections. let mut vmem = Vmem::default(); vmem.sections.push(Section::default()); loop { // Parse a token from the input string, and move along by its span. let Span { len, token } = Self::token(s)?; s = &s[len..]; match token { Token::Eof => break, Token::Addr(addr) => { // Add a new section to the `Vmem` at this address. // Here we translate between a "word index" to a byte address. vmem.sections.push(Section { addr: addr * 4, data: Vec::new(), }); } Token::Value(value) => { // Add the value to the current (last added) section's data. let section = vmem.sections.last_mut().unwrap(); section.data.push(value) } // Whitespace and comments are ignored. Token::Whitespace => continue, Token::Comment => continue, } } Ok(vmem) } /// Parse a single token from the beginning of a string. fn token(s: &str) -> ParseResult<Span> { let parsers = [ Self::parse_eof, Self::parse_addr, Self::parse_value, Self::parse_comment, Self::parse_whitespace, ]; // Run each parser in order, stopping when one gets a matching parse. let span = parsers.iter().find_map(|p| p(s).transpose()); // If no parsers succeeded, return an error. match span { Some(span) => span, None => Err(ParseError::UnknownChar(s.chars().next().unwrap())), } } /// Try to parse an EOF from the beginning of a string. fn parse_eof(s: &str) -> ParseResult<Option<Span>> { // Empty strings give a 0-length `Token::Eof` span. match s.is_empty() { true => Ok(Some(Span { len: 0, token: Token::Eof, })), false => Ok(None), } } /// Try to parse an address from the beginning of a string. fn parse_addr(s: &str) -> ParseResult<Option<Span>> { // Check for the beginning '@' symbol. let Some(addr) = s.strip_prefix('@') else { return Ok(None); }; // Find the length of the actual address string. let addr_len = match addr.find(|c: char| !c.is_ascii_hexdigit()) { Some(0) => return Err(ParseError::AddrMissingValue), Some(len) => len, None => addr.len(), }; // Ensure the '@' is included in the span's length! let len = '@'.len_utf8() + addr_len; // Parse from hexadecimal. let val = u32::from_str_radix(&addr[..addr_len], 16)?; let token = Token::Addr(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a value from the beginning of a string. fn parse_value(s: &str) -> ParseResult<Option<Span>> { // Check for hexadecimal characters in the input. let len = match s.find(|c: char| !c.is_ascii_hexdigit()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let val = u32::from_str_radix(&s[..len], 16)?; let token = Token::Value(val); let span = Span { token, len }; Ok(Some(span)) } /// Try parse a comment from the beginning of a string. fn parse_comment(s: &str) -> ParseResult<Option<Span>> { // Look for commend identifiers and their closers. let len = match s { s if s.starts_with("//") => s.find('\n').unwrap_or(s.len()), s if s.starts_with("/*") => { // `find` gives us the _start_ of the `*/`, so include its length as well. s.find("*/").ok_or(ParseError::UnclosedComment)? + "*/".len() } _ => return Ok(None), }; let token = Token::Comment; let span = Span { token, len }; Ok(Some(span)) } /// Try to parse whitespace from the beginning of a string. fn parse_whitespace(s: &str) -> ParseResult<Option<Span>>
} #[cfg(test)] mod test { use super::*; #[test] fn parse() { let input = r#" AB // comment CD EF @42 12 /* comment */ 34 "#; let expected = Vmem { sections: vec![ Section { addr: 0x00, data: vec![0xAB, 0xCD, 0xEF], }, Section { addr: 0x108, data: vec![0x12, 0x34], }, ], }; assert_eq!(VmemParser::parse(input).unwrap(), expected); } #[test] fn token() { // Check we can pick out the correct token from a string: let expected = [ ("", Token::Eof, 0), ("@ff", Token::Addr(0xff), 3), ("ff", Token::Value(0xff), 2), ("// X", Token::Comment, 4), ("/* X */", Token::Comment, 7), (" ", Token::Whitespace, 2), ]; for (s, token, len) in expected { let span = Span { token, len }; assert_eq!(VmemParser::token(s), Ok(span)); } // Unknown non-token: assert_eq!(VmemParser::token("X"), Err(ParseError::UnknownChar('X'))); } #[test] fn eof() { // Not EOF: assert_eq!(VmemParser::parse_eof(" ").unwrap(), None); // EOF: let expected = Some(Span { len: 0, token: Token::Eof, }); assert_eq!(VmemParser::parse_eof("").unwrap(), expected); } #[test] fn addr() { // No address: assert_eq!(VmemParser::parse_addr("/* X */").unwrap(), None); let expected = Some(Span { len: 9, token: Token::Addr(0x0123abcd), }); // Partially an address: assert_eq!(VmemParser::parse_addr("@0123ABCD FF").unwrap(), expected); // Entirely an address: assert_eq!(VmemParser::parse_addr("@0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_addr("@0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_addr("@123456789").is_err()); // Missing address after '@': assert!(VmemParser::parse_addr("@").is_err()); assert!(VmemParser::parse_addr("@ FF").is_err()); } #[test] fn value() { // No value: assert_eq!(VmemParser::parse_value("/* X */").unwrap(), None); let expected = Some(Span { len: 8, token: Token::Value(0x0123abcd), }); // Partially a value: assert_eq!(VmemParser::parse_value("0123ABCD FF").unwrap(), expected); // Entirely a value: assert_eq!(VmemParser::parse_value("0123ABCD").unwrap(), expected); // Lower-case hex characters: assert_eq!(VmemParser::parse_value("0123abcd").unwrap(), expected); // u32 overflow: assert!(VmemParser::parse_value("123456789").is_err()); } #[test] fn comment() { // No whitespace: assert_eq!(VmemParser::parse_comment("FF").unwrap(), None); let expected = Some(Span { len: 7, token: Token::Comment, }); // Partial block comment: assert_eq!(VmemParser::parse_comment("/* X */ FF").unwrap(), expected); // Entirely a block comment: assert_eq!(VmemParser::parse_comment("/* X */").unwrap(), expected); // Unclosed block comment: assert!(VmemParser::parse_comment("/* X").is_err()); // Line comment ending in newline: assert_eq!( VmemParser::parse_comment(concat!("// XXXX", '\n', "FF")).unwrap(), expected ); // Line comment ending at EOF: assert_eq!(VmemParser::parse_comment("// XXXX").unwrap(), expected); } #[test] fn whitespace() { // No whitespace: assert_eq!(VmemParser::parse_whitespace("FF").unwrap(), None); let expected = Some(Span { len: 2, token: Token::Whitespace, }); // Partial whitespace: assert_eq!(VmemParser::parse_whitespace(" FF").unwrap(), expected); // Entirely whitespace: assert_eq!(VmemParser::parse_whitespace(" ").unwrap(), expected); } }
{ // Check for whitespace at the beginning of the input. let len = match s.find(|c: char| !c.is_whitespace()) { Some(0) => return Ok(None), Some(len) => len, None => s.len(), }; let token = Token::Whitespace; let span = Span { len, token }; Ok(Some(span)) }
identifier_body
hCassandra_test.py
#!/usr/bin/env python __author__ = 'annyz' from sys import path # Append 'hydra' directory to Python path path.append("hydra/src/main/python") import sys import logging import math import ast import copy import json import threading import time import random import paramiko from datetime import datetime, timedelta from optparse import OptionParser from pprint import pformat # NOQA from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase from cassandra.cluster import Cluster try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('hCassandra', logging.DEBUG) class RunTestCassandra(HydraBase): def __init__(self, options, runtest=True, mock=False): self.options = options self.config = ConfigParser() HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock, app_dirs=['src', 'hydra']) self.stress_client = '/stress-client' self.add_appid(self.stress_client) if runtest: self.run_test() self.stop_appserver() def rerun_test(self, options): self.options = options self.reset_all_app_stats(self.stress_client) # Signal message sending l.info("Sending signal to Cassandra Stress client to start sending all messages..") # Force start-time for ALL clients +60 seconds from current time start_time = datetime.now() + timedelta(seconds=60) l.debug("Current Time: %s, Start Time: %s" % (datetime.now(), start_time)) task_list = self.all_task_ids[self.stress_client] ha_list = [] for task_id in task_list: info = self.apps[self.stress_client]['ip_port_map'][task_id] port = info[0] ip = info[1] ha_stress = HAnalyser(ip, port, task_id) # Signal ALL clients to start sending data, blocks until clients respond with "DONE" after sending all data ha_stress.start_test(start_time=start_time) ha_list.append(ha_stress) l.info('Waiting for test(s) to end...') if self.options.sim_failure: l.debug("Simulate Cassandra Node Failure. Init.") # Thread Event to indicate tests have been completed tests_completed = threading.Event() # Launch parallel Thread to simulate cassandra node failure. l.debug("Launch separate thread to simulate node failure and rejoin.") failure_thread = threading.Thread(target=simulate_node_failure, args=(self.options.cluster_ips.split(','), self.options.test_duration, tests_completed)) failure_thread.start() for idx, ha_stress in enumerate(ha_list): l.debug('Waiting for task [%s] in [%s:%s] test to END. Iteration: %s' % (ha_stress.task_id, ha_stress.server_ip, ha_stress.port, idx)) ha_stress.wait_for_testend() if self.options.sim_failure: l.debug("ALL tests are COMPLETED.") tests_completed.set() l.info('Fetch App Stats') self.fetch_app_stats(self.stress_client) return self.result_parser() def run_test(self, first_run=True): # Get Mesos/Marathon Clients self.start_init() # Reset (drop) Cassandra DB for cassandra-stress tool default 'keyspace' self.reset_db() # Create Table(s) & Triggers for stress Test # self.create_triggers() # Launch Cassandra Stress-Client(s) self.launch_stress_client() # Rerun the test res = self.rerun_test(self.options) # Return Test Results return res def create_triggers(self): try: cluster_ips = self.options.cluster_ips.split(',') cluster = Cluster(cluster_ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (cluster_ips)) session = cluster.connect() l.info("Create keyspace [keyspace1]...") # Create Keyspace session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', " "'replication_factor': '1'} AND durable_writes = true;") l.info("Create tables [standard1] & [counter1]...") table_create = "CREATE TABLE keyspace1.standard1 ( " \ "key blob PRIMARY KEY," \ "\"C0\" blob," \ "\"C1\" blob," \ "\"C2\" blob," \ "\"C3\" blob," \ "\"C3\" blob" \ ") WITH COMPACT STORAGE" \ "AND bloom_filter_fp_chance = 0.01" \ "AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \ "AND comment = ''" \ "AND compaction = {'class': " \ "'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \ " 'min_threshold': '4'}" \ "AND compression = {'enabled': 'false'}" \ "AND crc_check_chance = 1.0" \ "AND dclocal_read_repair_chance = 0.1" \ "AND default_time_to_live = 0" \ "AND gc_grace_seconds = 864000" \ "AND max_index_interval = 2048" \ "AND memtable_flush_period_in_ms = 0" \ "AND min_index_interval = 128" \ "AND read_repair_chance = 0.0" \ "AND speculative_retry = '99PERCENTILE';" l.info("Create standard1 Table") # Create 'standard1' & 'counter1' default Tables session.execute(table_create) l.info('Succeeded to create keyspace1 and standard1 Table.') # Create Trigger trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger' trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar session.execute(trigger_cql) except Exception as e: l.error('FAILED to create trigger. Error: %s' % str(e)) def reset_db(self): try: ips = self.options.cluster_ips.split(',') cluster = Cluster(ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (ips)) session = cluster.connect() l.info("dropping [keyspace1] (default) keyspace...") session.execute("DROP KEYSPACE keyspace1") l.info('Succeeded to delete DB.') except Exception as e: l.error('Failed to reset Cassandra DB. Error: %s' % str(e)) def stop_and_delete_all_apps(self): self.delete_all_launched_apps() def result_parser(self): result = { 'total ops': [], # Running total number of operations during the run. 'op/s': [], # Number of operations per second performed during the run. 'pk/s': [], # Number of partition operations per second performed during the run. 'row/s': 0, # Number of row operations per second performed during the run. 'mean': 0, # Average latency in milisecond for each operation during that run. 'med': [], # Median latency in miliseconds for each operation during that run. '.95': [], # 95% of the time the latency was less than this number. '.99': [], # 99% of the time the latency was less than this number. 'max': [], # Maximum latency in miliseconds. 'gc_num': 0, # Number of garbage collections. 'max_ms': [], # Longest garbage collection in miliseconds. 'sum_ms': 0, # Total of garbage collection in miliseconds. 'sdv_ms': [], # Standard deviation in miliseconds. 'mb': 0, # Size of the garbage collection in megabytes. 'op_time': [] # Total Operation Time per client } cassandra_results = { 'write': copy.deepcopy(result), 'read': copy.deepcopy(result) } # Get stats for Cassandra Stress Client stats = self.get_app_stats(self.stress_client) # num_clients = self.options.total_client_count db_ops = ['write', 'read'] for client in stats.keys(): info = stats[client] for db_op in db_ops: if db_op in info: try: info[db_op] = ast.literal_eval(info[db_op]) cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions'])) cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate'])) cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate'])) cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile'])) cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile'])) cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count']) cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)'])) cassandra_results[db_op]['max'].append(float(info[db_op]['latency max'])) cassandra_results[db_op]['med'].append(float(info[db_op]['latency median'])) cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', '')) except Exception as e: l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op])) l.error("ERROR: %s" % str(e)) return cassandra_results def launch_stress_client(self): max_threads_per_client = 20 l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count)) # Determine number of threads per Cassandra Stress Client if self.options.total_client_count > max_threads_per_client: # Calculating the number of apps we need to scale to client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client)) # Calculating the suitable number of threads we need to run in an app threads_per_client = int(math.ceil(self.options.total_client_count / client_count)) else: threads_per_client = self.options.total_client_count l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client)) self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s' % (self.options.total_ops_count, threads_per_client, self.options.cluster_ips, self.options.test_duration, self.options.cl, self.options.profile), cpus=0.2, mem=600, ports=[0]) if self.options.total_client_count > max_threads_per_client: l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count)) self.scale_and_verify_app(self.stress_client, client_count) def delete_all_launched_apps(self): l.info("Deleting Stress Clients") self.delete_app(self.stress_client) def simulate_node_failure(node_ips, max_duration, tests_completed): """ Simulate random cassandra node failure and 'rejoin' into cluster """ run = True l.info("START Cassandra Node Failure Simulation. Entering.") while run: # If stress-tests are still running continue with node failure simulation if not tests_completed.isSet(): # Select 'random' node from Cassandra Cluster node_ip = select_random_node(node_ips) # Determine delay before stopping cassandra node (to simulate failure / node down) duration_secs = max_duration*60 time_next_stop = random.randint(1, duration_secs/4) l.debug("STOP programmed in %s seconds" % time_next_stop) # Wait time.sleep(time_next_stop) ssh_fail = False # Stop Cassandra Node (simulate failure / stop the service) stop_cmd = "sudo service cassandra stop" l.debug("STOP Cassandra Node: %s"%node_ip) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(str(node_ip)) l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip) except paramiko.AuthenticationException as e: l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e)) ssh_fail = True except: l.error("Could not SSH to %s, waiting for it to start" % node_ip) ssh_fail = True if not ssh_fail: # Send the command to STOP cassandra node ssh.exec_command(stop_cmd) # Determine delay before starting cassandra node (to simulate rejoin to the cluster) time_next_rejoin = random.randint(1, duration_secs/4) l.debug("START programmed in %s seconds" % time_next_rejoin) time.sleep(time_next_rejoin) # Start Cassandra Node (simulate rejoin / start the service) start_cmd = "sudo service cassandra start" l.debug("START Cassandra Node: %s"%node_ip) # Send the command (non-blocking) ssh.exec_command(start_cmd) # Disconnect from the host l.debug("Closing SSH connection to host: %s" % node_ip) ssh.close() run=False else: # Tests Complete has been signaled run=False l.info("END node failure simulation. Exiting.") def select_random_node(cluster_ips):
class RunTest(object): def __init__(self, argv): usage = ('python %prog --test_duration=<time to run test> --total_ops_count=<Total Operations>' '--total_client_count=<Total clients to launch> --cluster_ips=<cassandra node list ips>' '--consistency_level=<cassandra consistency level> --profile=<yaml profile>' '--config_file=<path_to_config_file> --sim_failure=<simulate node failure>') parser = OptionParser(description='cassandra scale test master', version="0.1", usage=usage) parser.add_option("--test_duration", dest='test_duration', type='int', default=5) parser.add_option("--total_ops_count", dest='total_ops_count', type='int', default=1000000) parser.add_option("--total_client_count", dest='total_client_count', type='int', default=20) parser.add_option("--cluster_ips", dest='cluster_ips', type='string', default='127.0.0.1') parser.add_option("--consistency_level", dest='cl', type='string', default='LOCAL_ONE') parser.add_option("--profile", dest='profile', type='string', default='hydra_profile.yaml') parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini') parser.add_option("--sim_failure", dest='sim_failure', action="store_true", default=False) (options, args) = parser.parse_args() # Check NO list of positional arguments leftover after parsing options if ((len(args) != 0)): parser.print_help() sys.exit(1) # Run Cassandra Test r = RunTestCassandra(options, False) r.start_appserver() res = r.run_test() r.delete_all_launched_apps() # Cassandra-Stress Test Results result_json = json.dumps(res) print("Cassandra Stress Results: \n%s" % pformat(result_json)) r.stop_appserver() if __name__ == "__main__": RunTest(sys.argv)
""" Select a random cassandra node from a list of IPs """ return random.choice(cluster_ips)
identifier_body
hCassandra_test.py
#!/usr/bin/env python __author__ = 'annyz' from sys import path # Append 'hydra' directory to Python path path.append("hydra/src/main/python") import sys import logging import math import ast import copy import json import threading import time import random import paramiko from datetime import datetime, timedelta from optparse import OptionParser from pprint import pformat # NOQA from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase from cassandra.cluster import Cluster try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('hCassandra', logging.DEBUG) class RunTestCassandra(HydraBase): def __init__(self, options, runtest=True, mock=False): self.options = options self.config = ConfigParser() HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock, app_dirs=['src', 'hydra']) self.stress_client = '/stress-client' self.add_appid(self.stress_client) if runtest: self.run_test() self.stop_appserver() def rerun_test(self, options): self.options = options self.reset_all_app_stats(self.stress_client) # Signal message sending l.info("Sending signal to Cassandra Stress client to start sending all messages..") # Force start-time for ALL clients +60 seconds from current time start_time = datetime.now() + timedelta(seconds=60) l.debug("Current Time: %s, Start Time: %s" % (datetime.now(), start_time)) task_list = self.all_task_ids[self.stress_client] ha_list = [] for task_id in task_list: info = self.apps[self.stress_client]['ip_port_map'][task_id] port = info[0] ip = info[1] ha_stress = HAnalyser(ip, port, task_id) # Signal ALL clients to start sending data, blocks until clients respond with "DONE" after sending all data ha_stress.start_test(start_time=start_time) ha_list.append(ha_stress) l.info('Waiting for test(s) to end...') if self.options.sim_failure: l.debug("Simulate Cassandra Node Failure. Init.") # Thread Event to indicate tests have been completed tests_completed = threading.Event() # Launch parallel Thread to simulate cassandra node failure. l.debug("Launch separate thread to simulate node failure and rejoin.") failure_thread = threading.Thread(target=simulate_node_failure, args=(self.options.cluster_ips.split(','), self.options.test_duration, tests_completed)) failure_thread.start() for idx, ha_stress in enumerate(ha_list): l.debug('Waiting for task [%s] in [%s:%s] test to END. Iteration: %s' % (ha_stress.task_id, ha_stress.server_ip, ha_stress.port, idx)) ha_stress.wait_for_testend() if self.options.sim_failure: l.debug("ALL tests are COMPLETED.") tests_completed.set() l.info('Fetch App Stats') self.fetch_app_stats(self.stress_client) return self.result_parser() def run_test(self, first_run=True): # Get Mesos/Marathon Clients self.start_init() # Reset (drop) Cassandra DB for cassandra-stress tool default 'keyspace' self.reset_db() # Create Table(s) & Triggers for stress Test # self.create_triggers() # Launch Cassandra Stress-Client(s) self.launch_stress_client() # Rerun the test res = self.rerun_test(self.options) # Return Test Results return res def create_triggers(self): try: cluster_ips = self.options.cluster_ips.split(',') cluster = Cluster(cluster_ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (cluster_ips)) session = cluster.connect() l.info("Create keyspace [keyspace1]...") # Create Keyspace session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', " "'replication_factor': '1'} AND durable_writes = true;") l.info("Create tables [standard1] & [counter1]...") table_create = "CREATE TABLE keyspace1.standard1 ( " \ "key blob PRIMARY KEY," \ "\"C0\" blob," \ "\"C1\" blob," \ "\"C2\" blob," \ "\"C3\" blob," \ "\"C3\" blob" \ ") WITH COMPACT STORAGE" \ "AND bloom_filter_fp_chance = 0.01" \ "AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \ "AND comment = ''" \ "AND compaction = {'class': " \ "'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \ " 'min_threshold': '4'}" \ "AND compression = {'enabled': 'false'}" \ "AND crc_check_chance = 1.0" \ "AND dclocal_read_repair_chance = 0.1" \ "AND default_time_to_live = 0" \ "AND gc_grace_seconds = 864000" \ "AND max_index_interval = 2048" \ "AND memtable_flush_period_in_ms = 0" \ "AND min_index_interval = 128" \ "AND read_repair_chance = 0.0" \ "AND speculative_retry = '99PERCENTILE';" l.info("Create standard1 Table") # Create 'standard1' & 'counter1' default Tables session.execute(table_create) l.info('Succeeded to create keyspace1 and standard1 Table.') # Create Trigger trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger' trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar session.execute(trigger_cql) except Exception as e: l.error('FAILED to create trigger. Error: %s' % str(e)) def reset_db(self): try: ips = self.options.cluster_ips.split(',') cluster = Cluster(ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (ips)) session = cluster.connect() l.info("dropping [keyspace1] (default) keyspace...") session.execute("DROP KEYSPACE keyspace1") l.info('Succeeded to delete DB.') except Exception as e: l.error('Failed to reset Cassandra DB. Error: %s' % str(e)) def stop_and_delete_all_apps(self): self.delete_all_launched_apps() def result_parser(self): result = { 'total ops': [], # Running total number of operations during the run. 'op/s': [], # Number of operations per second performed during the run. 'pk/s': [], # Number of partition operations per second performed during the run. 'row/s': 0, # Number of row operations per second performed during the run. 'mean': 0, # Average latency in milisecond for each operation during that run. 'med': [], # Median latency in miliseconds for each operation during that run. '.95': [], # 95% of the time the latency was less than this number. '.99': [], # 99% of the time the latency was less than this number. 'max': [], # Maximum latency in miliseconds. 'gc_num': 0, # Number of garbage collections. 'max_ms': [], # Longest garbage collection in miliseconds. 'sum_ms': 0, # Total of garbage collection in miliseconds. 'sdv_ms': [], # Standard deviation in miliseconds. 'mb': 0, # Size of the garbage collection in megabytes. 'op_time': [] # Total Operation Time per client } cassandra_results = { 'write': copy.deepcopy(result), 'read': copy.deepcopy(result) } # Get stats for Cassandra Stress Client stats = self.get_app_stats(self.stress_client) # num_clients = self.options.total_client_count db_ops = ['write', 'read'] for client in stats.keys(): info = stats[client] for db_op in db_ops: if db_op in info: try: info[db_op] = ast.literal_eval(info[db_op]) cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions'])) cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate'])) cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate'])) cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile'])) cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile'])) cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count']) cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)'])) cassandra_results[db_op]['max'].append(float(info[db_op]['latency max'])) cassandra_results[db_op]['med'].append(float(info[db_op]['latency median'])) cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', '')) except Exception as e: l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op])) l.error("ERROR: %s" % str(e)) return cassandra_results def launch_stress_client(self): max_threads_per_client = 20 l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count)) # Determine number of threads per Cassandra Stress Client if self.options.total_client_count > max_threads_per_client: # Calculating the number of apps we need to scale to client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client)) # Calculating the suitable number of threads we need to run in an app threads_per_client = int(math.ceil(self.options.total_client_count / client_count)) else: threads_per_client = self.options.total_client_count l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client)) self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s' % (self.options.total_ops_count, threads_per_client, self.options.cluster_ips, self.options.test_duration, self.options.cl, self.options.profile), cpus=0.2, mem=600, ports=[0]) if self.options.total_client_count > max_threads_per_client: l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count)) self.scale_and_verify_app(self.stress_client, client_count) def delete_all_launched_apps(self): l.info("Deleting Stress Clients") self.delete_app(self.stress_client) def simulate_node_failure(node_ips, max_duration, tests_completed): """ Simulate random cassandra node failure and 'rejoin' into cluster """ run = True l.info("START Cassandra Node Failure Simulation. Entering.") while run: # If stress-tests are still running continue with node failure simulation if not tests_completed.isSet(): # Select 'random' node from Cassandra Cluster
else: # Tests Complete has been signaled run=False l.info("END node failure simulation. Exiting.") def select_random_node(cluster_ips): """ Select a random cassandra node from a list of IPs """ return random.choice(cluster_ips) class RunTest(object): def __init__(self, argv): usage = ('python %prog --test_duration=<time to run test> --total_ops_count=<Total Operations>' '--total_client_count=<Total clients to launch> --cluster_ips=<cassandra node list ips>' '--consistency_level=<cassandra consistency level> --profile=<yaml profile>' '--config_file=<path_to_config_file> --sim_failure=<simulate node failure>') parser = OptionParser(description='cassandra scale test master', version="0.1", usage=usage) parser.add_option("--test_duration", dest='test_duration', type='int', default=5) parser.add_option("--total_ops_count", dest='total_ops_count', type='int', default=1000000) parser.add_option("--total_client_count", dest='total_client_count', type='int', default=20) parser.add_option("--cluster_ips", dest='cluster_ips', type='string', default='127.0.0.1') parser.add_option("--consistency_level", dest='cl', type='string', default='LOCAL_ONE') parser.add_option("--profile", dest='profile', type='string', default='hydra_profile.yaml') parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini') parser.add_option("--sim_failure", dest='sim_failure', action="store_true", default=False) (options, args) = parser.parse_args() # Check NO list of positional arguments leftover after parsing options if ((len(args) != 0)): parser.print_help() sys.exit(1) # Run Cassandra Test r = RunTestCassandra(options, False) r.start_appserver() res = r.run_test() r.delete_all_launched_apps() # Cassandra-Stress Test Results result_json = json.dumps(res) print("Cassandra Stress Results: \n%s" % pformat(result_json)) r.stop_appserver() if __name__ == "__main__": RunTest(sys.argv)
node_ip = select_random_node(node_ips) # Determine delay before stopping cassandra node (to simulate failure / node down) duration_secs = max_duration*60 time_next_stop = random.randint(1, duration_secs/4) l.debug("STOP programmed in %s seconds" % time_next_stop) # Wait time.sleep(time_next_stop) ssh_fail = False # Stop Cassandra Node (simulate failure / stop the service) stop_cmd = "sudo service cassandra stop" l.debug("STOP Cassandra Node: %s"%node_ip) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(str(node_ip)) l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip) except paramiko.AuthenticationException as e: l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e)) ssh_fail = True except: l.error("Could not SSH to %s, waiting for it to start" % node_ip) ssh_fail = True if not ssh_fail: # Send the command to STOP cassandra node ssh.exec_command(stop_cmd) # Determine delay before starting cassandra node (to simulate rejoin to the cluster) time_next_rejoin = random.randint(1, duration_secs/4) l.debug("START programmed in %s seconds" % time_next_rejoin) time.sleep(time_next_rejoin) # Start Cassandra Node (simulate rejoin / start the service) start_cmd = "sudo service cassandra start" l.debug("START Cassandra Node: %s"%node_ip) # Send the command (non-blocking) ssh.exec_command(start_cmd) # Disconnect from the host l.debug("Closing SSH connection to host: %s" % node_ip) ssh.close() run=False
conditional_block
hCassandra_test.py
#!/usr/bin/env python __author__ = 'annyz' from sys import path # Append 'hydra' directory to Python path path.append("hydra/src/main/python") import sys import logging import math import ast import copy import json import threading import time import random import paramiko from datetime import datetime, timedelta from optparse import OptionParser from pprint import pformat # NOQA from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase from cassandra.cluster import Cluster try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('hCassandra', logging.DEBUG) class
(HydraBase): def __init__(self, options, runtest=True, mock=False): self.options = options self.config = ConfigParser() HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock, app_dirs=['src', 'hydra']) self.stress_client = '/stress-client' self.add_appid(self.stress_client) if runtest: self.run_test() self.stop_appserver() def rerun_test(self, options): self.options = options self.reset_all_app_stats(self.stress_client) # Signal message sending l.info("Sending signal to Cassandra Stress client to start sending all messages..") # Force start-time for ALL clients +60 seconds from current time start_time = datetime.now() + timedelta(seconds=60) l.debug("Current Time: %s, Start Time: %s" % (datetime.now(), start_time)) task_list = self.all_task_ids[self.stress_client] ha_list = [] for task_id in task_list: info = self.apps[self.stress_client]['ip_port_map'][task_id] port = info[0] ip = info[1] ha_stress = HAnalyser(ip, port, task_id) # Signal ALL clients to start sending data, blocks until clients respond with "DONE" after sending all data ha_stress.start_test(start_time=start_time) ha_list.append(ha_stress) l.info('Waiting for test(s) to end...') if self.options.sim_failure: l.debug("Simulate Cassandra Node Failure. Init.") # Thread Event to indicate tests have been completed tests_completed = threading.Event() # Launch parallel Thread to simulate cassandra node failure. l.debug("Launch separate thread to simulate node failure and rejoin.") failure_thread = threading.Thread(target=simulate_node_failure, args=(self.options.cluster_ips.split(','), self.options.test_duration, tests_completed)) failure_thread.start() for idx, ha_stress in enumerate(ha_list): l.debug('Waiting for task [%s] in [%s:%s] test to END. Iteration: %s' % (ha_stress.task_id, ha_stress.server_ip, ha_stress.port, idx)) ha_stress.wait_for_testend() if self.options.sim_failure: l.debug("ALL tests are COMPLETED.") tests_completed.set() l.info('Fetch App Stats') self.fetch_app_stats(self.stress_client) return self.result_parser() def run_test(self, first_run=True): # Get Mesos/Marathon Clients self.start_init() # Reset (drop) Cassandra DB for cassandra-stress tool default 'keyspace' self.reset_db() # Create Table(s) & Triggers for stress Test # self.create_triggers() # Launch Cassandra Stress-Client(s) self.launch_stress_client() # Rerun the test res = self.rerun_test(self.options) # Return Test Results return res def create_triggers(self): try: cluster_ips = self.options.cluster_ips.split(',') cluster = Cluster(cluster_ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (cluster_ips)) session = cluster.connect() l.info("Create keyspace [keyspace1]...") # Create Keyspace session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', " "'replication_factor': '1'} AND durable_writes = true;") l.info("Create tables [standard1] & [counter1]...") table_create = "CREATE TABLE keyspace1.standard1 ( " \ "key blob PRIMARY KEY," \ "\"C0\" blob," \ "\"C1\" blob," \ "\"C2\" blob," \ "\"C3\" blob," \ "\"C3\" blob" \ ") WITH COMPACT STORAGE" \ "AND bloom_filter_fp_chance = 0.01" \ "AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \ "AND comment = ''" \ "AND compaction = {'class': " \ "'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \ " 'min_threshold': '4'}" \ "AND compression = {'enabled': 'false'}" \ "AND crc_check_chance = 1.0" \ "AND dclocal_read_repair_chance = 0.1" \ "AND default_time_to_live = 0" \ "AND gc_grace_seconds = 864000" \ "AND max_index_interval = 2048" \ "AND memtable_flush_period_in_ms = 0" \ "AND min_index_interval = 128" \ "AND read_repair_chance = 0.0" \ "AND speculative_retry = '99PERCENTILE';" l.info("Create standard1 Table") # Create 'standard1' & 'counter1' default Tables session.execute(table_create) l.info('Succeeded to create keyspace1 and standard1 Table.') # Create Trigger trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger' trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar session.execute(trigger_cql) except Exception as e: l.error('FAILED to create trigger. Error: %s' % str(e)) def reset_db(self): try: ips = self.options.cluster_ips.split(',') cluster = Cluster(ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (ips)) session = cluster.connect() l.info("dropping [keyspace1] (default) keyspace...") session.execute("DROP KEYSPACE keyspace1") l.info('Succeeded to delete DB.') except Exception as e: l.error('Failed to reset Cassandra DB. Error: %s' % str(e)) def stop_and_delete_all_apps(self): self.delete_all_launched_apps() def result_parser(self): result = { 'total ops': [], # Running total number of operations during the run. 'op/s': [], # Number of operations per second performed during the run. 'pk/s': [], # Number of partition operations per second performed during the run. 'row/s': 0, # Number of row operations per second performed during the run. 'mean': 0, # Average latency in milisecond for each operation during that run. 'med': [], # Median latency in miliseconds for each operation during that run. '.95': [], # 95% of the time the latency was less than this number. '.99': [], # 99% of the time the latency was less than this number. 'max': [], # Maximum latency in miliseconds. 'gc_num': 0, # Number of garbage collections. 'max_ms': [], # Longest garbage collection in miliseconds. 'sum_ms': 0, # Total of garbage collection in miliseconds. 'sdv_ms': [], # Standard deviation in miliseconds. 'mb': 0, # Size of the garbage collection in megabytes. 'op_time': [] # Total Operation Time per client } cassandra_results = { 'write': copy.deepcopy(result), 'read': copy.deepcopy(result) } # Get stats for Cassandra Stress Client stats = self.get_app_stats(self.stress_client) # num_clients = self.options.total_client_count db_ops = ['write', 'read'] for client in stats.keys(): info = stats[client] for db_op in db_ops: if db_op in info: try: info[db_op] = ast.literal_eval(info[db_op]) cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions'])) cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate'])) cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate'])) cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile'])) cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile'])) cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count']) cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)'])) cassandra_results[db_op]['max'].append(float(info[db_op]['latency max'])) cassandra_results[db_op]['med'].append(float(info[db_op]['latency median'])) cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', '')) except Exception as e: l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op])) l.error("ERROR: %s" % str(e)) return cassandra_results def launch_stress_client(self): max_threads_per_client = 20 l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count)) # Determine number of threads per Cassandra Stress Client if self.options.total_client_count > max_threads_per_client: # Calculating the number of apps we need to scale to client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client)) # Calculating the suitable number of threads we need to run in an app threads_per_client = int(math.ceil(self.options.total_client_count / client_count)) else: threads_per_client = self.options.total_client_count l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client)) self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s' % (self.options.total_ops_count, threads_per_client, self.options.cluster_ips, self.options.test_duration, self.options.cl, self.options.profile), cpus=0.2, mem=600, ports=[0]) if self.options.total_client_count > max_threads_per_client: l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count)) self.scale_and_verify_app(self.stress_client, client_count) def delete_all_launched_apps(self): l.info("Deleting Stress Clients") self.delete_app(self.stress_client) def simulate_node_failure(node_ips, max_duration, tests_completed): """ Simulate random cassandra node failure and 'rejoin' into cluster """ run = True l.info("START Cassandra Node Failure Simulation. Entering.") while run: # If stress-tests are still running continue with node failure simulation if not tests_completed.isSet(): # Select 'random' node from Cassandra Cluster node_ip = select_random_node(node_ips) # Determine delay before stopping cassandra node (to simulate failure / node down) duration_secs = max_duration*60 time_next_stop = random.randint(1, duration_secs/4) l.debug("STOP programmed in %s seconds" % time_next_stop) # Wait time.sleep(time_next_stop) ssh_fail = False # Stop Cassandra Node (simulate failure / stop the service) stop_cmd = "sudo service cassandra stop" l.debug("STOP Cassandra Node: %s"%node_ip) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(str(node_ip)) l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip) except paramiko.AuthenticationException as e: l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e)) ssh_fail = True except: l.error("Could not SSH to %s, waiting for it to start" % node_ip) ssh_fail = True if not ssh_fail: # Send the command to STOP cassandra node ssh.exec_command(stop_cmd) # Determine delay before starting cassandra node (to simulate rejoin to the cluster) time_next_rejoin = random.randint(1, duration_secs/4) l.debug("START programmed in %s seconds" % time_next_rejoin) time.sleep(time_next_rejoin) # Start Cassandra Node (simulate rejoin / start the service) start_cmd = "sudo service cassandra start" l.debug("START Cassandra Node: %s"%node_ip) # Send the command (non-blocking) ssh.exec_command(start_cmd) # Disconnect from the host l.debug("Closing SSH connection to host: %s" % node_ip) ssh.close() run=False else: # Tests Complete has been signaled run=False l.info("END node failure simulation. Exiting.") def select_random_node(cluster_ips): """ Select a random cassandra node from a list of IPs """ return random.choice(cluster_ips) class RunTest(object): def __init__(self, argv): usage = ('python %prog --test_duration=<time to run test> --total_ops_count=<Total Operations>' '--total_client_count=<Total clients to launch> --cluster_ips=<cassandra node list ips>' '--consistency_level=<cassandra consistency level> --profile=<yaml profile>' '--config_file=<path_to_config_file> --sim_failure=<simulate node failure>') parser = OptionParser(description='cassandra scale test master', version="0.1", usage=usage) parser.add_option("--test_duration", dest='test_duration', type='int', default=5) parser.add_option("--total_ops_count", dest='total_ops_count', type='int', default=1000000) parser.add_option("--total_client_count", dest='total_client_count', type='int', default=20) parser.add_option("--cluster_ips", dest='cluster_ips', type='string', default='127.0.0.1') parser.add_option("--consistency_level", dest='cl', type='string', default='LOCAL_ONE') parser.add_option("--profile", dest='profile', type='string', default='hydra_profile.yaml') parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini') parser.add_option("--sim_failure", dest='sim_failure', action="store_true", default=False) (options, args) = parser.parse_args() # Check NO list of positional arguments leftover after parsing options if ((len(args) != 0)): parser.print_help() sys.exit(1) # Run Cassandra Test r = RunTestCassandra(options, False) r.start_appserver() res = r.run_test() r.delete_all_launched_apps() # Cassandra-Stress Test Results result_json = json.dumps(res) print("Cassandra Stress Results: \n%s" % pformat(result_json)) r.stop_appserver() if __name__ == "__main__": RunTest(sys.argv)
RunTestCassandra
identifier_name
hCassandra_test.py
#!/usr/bin/env python __author__ = 'annyz' from sys import path # Append 'hydra' directory to Python path path.append("hydra/src/main/python") import sys import logging import math import ast import copy import json import threading import time import random import paramiko from datetime import datetime, timedelta from optparse import OptionParser from pprint import pformat # NOQA from hydra.lib import util from hydra.lib.h_analyser import HAnalyser from hydra.lib.hydrabase import HydraBase from cassandra.cluster import Cluster try: # Python 2.x from ConfigParser import ConfigParser except ImportError: # Python 3.x from configparser import ConfigParser l = util.createlogger('hCassandra', logging.DEBUG) class RunTestCassandra(HydraBase): def __init__(self, options, runtest=True, mock=False): self.options = options self.config = ConfigParser() HydraBase.__init__(self, 'CassandraStressTest', self.options, self.config, startappserver=runtest, mock=mock, app_dirs=['src', 'hydra']) self.stress_client = '/stress-client' self.add_appid(self.stress_client) if runtest: self.run_test() self.stop_appserver() def rerun_test(self, options): self.options = options self.reset_all_app_stats(self.stress_client) # Signal message sending l.info("Sending signal to Cassandra Stress client to start sending all messages..") # Force start-time for ALL clients +60 seconds from current time start_time = datetime.now() + timedelta(seconds=60) l.debug("Current Time: %s, Start Time: %s" % (datetime.now(), start_time)) task_list = self.all_task_ids[self.stress_client] ha_list = [] for task_id in task_list: info = self.apps[self.stress_client]['ip_port_map'][task_id] port = info[0] ip = info[1] ha_stress = HAnalyser(ip, port, task_id) # Signal ALL clients to start sending data, blocks until clients respond with "DONE" after sending all data ha_stress.start_test(start_time=start_time) ha_list.append(ha_stress) l.info('Waiting for test(s) to end...') if self.options.sim_failure: l.debug("Simulate Cassandra Node Failure. Init.") # Thread Event to indicate tests have been completed tests_completed = threading.Event() # Launch parallel Thread to simulate cassandra node failure. l.debug("Launch separate thread to simulate node failure and rejoin.") failure_thread = threading.Thread(target=simulate_node_failure, args=(self.options.cluster_ips.split(','), self.options.test_duration, tests_completed)) failure_thread.start() for idx, ha_stress in enumerate(ha_list): l.debug('Waiting for task [%s] in [%s:%s] test to END. Iteration: %s' % (ha_stress.task_id, ha_stress.server_ip, ha_stress.port, idx)) ha_stress.wait_for_testend() if self.options.sim_failure: l.debug("ALL tests are COMPLETED.") tests_completed.set() l.info('Fetch App Stats') self.fetch_app_stats(self.stress_client) return self.result_parser() def run_test(self, first_run=True): # Get Mesos/Marathon Clients self.start_init() # Reset (drop) Cassandra DB for cassandra-stress tool default 'keyspace' self.reset_db() # Create Table(s) & Triggers for stress Test # self.create_triggers() # Launch Cassandra Stress-Client(s) self.launch_stress_client() # Rerun the test res = self.rerun_test(self.options) # Return Test Results return res def create_triggers(self): try: cluster_ips = self.options.cluster_ips.split(',') cluster = Cluster(cluster_ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (cluster_ips)) session = cluster.connect() l.info("Create keyspace [keyspace1]...") # Create Keyspace session.execute("CREATE KEYSPACE keyspace1 WITH replication = {'class': 'SimpleStrategy', " "'replication_factor': '1'} AND durable_writes = true;") l.info("Create tables [standard1] & [counter1]...") table_create = "CREATE TABLE keyspace1.standard1 ( " \ "key blob PRIMARY KEY," \ "\"C0\" blob," \ "\"C1\" blob," \ "\"C2\" blob," \ "\"C3\" blob," \ "\"C3\" blob" \ ") WITH COMPACT STORAGE" \ "AND bloom_filter_fp_chance = 0.01" \ "AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}" \ "AND comment = ''" \ "AND compaction = {'class': " \ "'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'," \ " 'min_threshold': '4'}" \ "AND compression = {'enabled': 'false'}" \ "AND crc_check_chance = 1.0" \ "AND dclocal_read_repair_chance = 0.1" \ "AND default_time_to_live = 0" \ "AND gc_grace_seconds = 864000" \ "AND max_index_interval = 2048" \ "AND memtable_flush_period_in_ms = 0" \ "AND min_index_interval = 128" \ "AND read_repair_chance = 0.0" \ "AND speculative_retry = '99PERCENTILE';" l.info("Create standard1 Table") # Create 'standard1' & 'counter1' default Tables session.execute(table_create) l.info('Succeeded to create keyspace1 and standard1 Table.') # Create Trigger trigger_jar = 'org.apache.cassandra.triggers.AuditTrigger' trigger_cql = "CREATE TRIGGER pushTrigger ON keyspace1.standard1 USING " + trigger_jar session.execute(trigger_cql) except Exception as e: l.error('FAILED to create trigger. Error: %s' % str(e)) def reset_db(self): try: ips = self.options.cluster_ips.split(',') cluster = Cluster(ips) l.debug("Connecting to Cassandra Cluster: [%s]" % (ips)) session = cluster.connect() l.info("dropping [keyspace1] (default) keyspace...") session.execute("DROP KEYSPACE keyspace1") l.info('Succeeded to delete DB.') except Exception as e: l.error('Failed to reset Cassandra DB. Error: %s' % str(e)) def stop_and_delete_all_apps(self): self.delete_all_launched_apps() def result_parser(self): result = { 'total ops': [], # Running total number of operations during the run. 'op/s': [], # Number of operations per second performed during the run. 'pk/s': [], # Number of partition operations per second performed during the run. 'row/s': 0, # Number of row operations per second performed during the run. 'mean': 0, # Average latency in milisecond for each operation during that run. 'med': [], # Median latency in miliseconds for each operation during that run. '.95': [], # 95% of the time the latency was less than this number. '.99': [], # 99% of the time the latency was less than this number. 'max': [], # Maximum latency in miliseconds. 'gc_num': 0, # Number of garbage collections. 'max_ms': [], # Longest garbage collection in miliseconds. 'sum_ms': 0, # Total of garbage collection in miliseconds. 'sdv_ms': [], # Standard deviation in miliseconds. 'mb': 0, # Size of the garbage collection in megabytes. 'op_time': [] # Total Operation Time per client } cassandra_results = { 'write': copy.deepcopy(result), 'read': copy.deepcopy(result) } # Get stats for Cassandra Stress Client stats = self.get_app_stats(self.stress_client) # num_clients = self.options.total_client_count db_ops = ['write', 'read'] for client in stats.keys(): info = stats[client] for db_op in db_ops: if db_op in info: try: info[db_op] = ast.literal_eval(info[db_op]) cassandra_results[db_op]['total ops'].append(int(info[db_op]['Total partitions'])) cassandra_results[db_op]['op/s'].append(int(info[db_op]['op rate'])) cassandra_results[db_op]['pk/s'].append(int(info[db_op]['partition rate'])) cassandra_results[db_op]['.95'].append(float(info[db_op]['latency 95th percentile'])) cassandra_results[db_op]['.99'].append(float(info[db_op]['latency 99th percentile'])) cassandra_results[db_op]['gc_num'] += int(info[db_op]['total gc count']) cassandra_results[db_op]['sdv_ms'].append(float(info[db_op]['stdev gc time(ms)'])) cassandra_results[db_op]['max'].append(float(info[db_op]['latency max'])) cassandra_results[db_op]['med'].append(float(info[db_op]['latency median'])) cassandra_results[db_op]['op_time'].append((info[db_op]['Total operation time']).replace(' ', '')) except Exception as e: l.error("Failed to parse stats from Client: " + pformat(client) + " DATA = " + pformat(info[db_op])) l.error("ERROR: %s" % str(e)) return cassandra_results def launch_stress_client(self): max_threads_per_client = 20 l.info("Launching the Cassandra Stress Client(s). Total clients = %s" % (self.options.total_client_count)) # Determine number of threads per Cassandra Stress Client if self.options.total_client_count > max_threads_per_client: # Calculating the number of apps we need to scale to client_count = math.ceil(self.options.total_client_count / float(max_threads_per_client)) # Calculating the suitable number of threads we need to run in an app threads_per_client = int(math.ceil(self.options.total_client_count / client_count)) else: threads_per_client = self.options.total_client_count l.debug("Number of Threads per Cassandra-Stress Client, set to: %s" % (threads_per_client)) self.create_binary_app(name=self.stress_client, app_script='./src/stress_client.py %s %s %s %s %s %s' % (self.options.total_ops_count, threads_per_client, self.options.cluster_ips, self.options.test_duration, self.options.cl, self.options.profile), cpus=0.2, mem=600, ports=[0]) if self.options.total_client_count > max_threads_per_client: l.info("Number of Cassandra-Stress Clients to launch = %s" % (client_count)) self.scale_and_verify_app(self.stress_client, client_count) def delete_all_launched_apps(self): l.info("Deleting Stress Clients") self.delete_app(self.stress_client) def simulate_node_failure(node_ips, max_duration, tests_completed): """ Simulate random cassandra node failure and 'rejoin' into cluster """ run = True l.info("START Cassandra Node Failure Simulation. Entering.") while run: # If stress-tests are still running continue with node failure simulation if not tests_completed.isSet(): # Select 'random' node from Cassandra Cluster node_ip = select_random_node(node_ips) # Determine delay before stopping cassandra node (to simulate failure / node down) duration_secs = max_duration*60 time_next_stop = random.randint(1, duration_secs/4) l.debug("STOP programmed in %s seconds" % time_next_stop) # Wait time.sleep(time_next_stop) ssh_fail = False # Stop Cassandra Node (simulate failure / stop the service) stop_cmd = "sudo service cassandra stop" l.debug("STOP Cassandra Node: %s"%node_ip) try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(str(node_ip)) l.debug("[Simulate Cassandra Node Failure] Connected to host: %s" % node_ip) except paramiko.AuthenticationException as e: l.error("Authentication failed when connecting to %s. ERROR: %s" % (node_ip, e)) ssh_fail = True except: l.error("Could not SSH to %s, waiting for it to start" % node_ip) ssh_fail = True if not ssh_fail: # Send the command to STOP cassandra node
time.sleep(time_next_rejoin) # Start Cassandra Node (simulate rejoin / start the service) start_cmd = "sudo service cassandra start" l.debug("START Cassandra Node: %s"%node_ip) # Send the command (non-blocking) ssh.exec_command(start_cmd) # Disconnect from the host l.debug("Closing SSH connection to host: %s" % node_ip) ssh.close() run=False else: # Tests Complete has been signaled run=False l.info("END node failure simulation. Exiting.") def select_random_node(cluster_ips): """ Select a random cassandra node from a list of IPs """ return random.choice(cluster_ips) class RunTest(object): def __init__(self, argv): usage = ('python %prog --test_duration=<time to run test> --total_ops_count=<Total Operations>' '--total_client_count=<Total clients to launch> --cluster_ips=<cassandra node list ips>' '--consistency_level=<cassandra consistency level> --profile=<yaml profile>' '--config_file=<path_to_config_file> --sim_failure=<simulate node failure>') parser = OptionParser(description='cassandra scale test master', version="0.1", usage=usage) parser.add_option("--test_duration", dest='test_duration', type='int', default=5) parser.add_option("--total_ops_count", dest='total_ops_count', type='int', default=1000000) parser.add_option("--total_client_count", dest='total_client_count', type='int', default=20) parser.add_option("--cluster_ips", dest='cluster_ips', type='string', default='127.0.0.1') parser.add_option("--consistency_level", dest='cl', type='string', default='LOCAL_ONE') parser.add_option("--profile", dest='profile', type='string', default='hydra_profile.yaml') parser.add_option("--config_file", dest='config_file', type='string', default='hydra.ini') parser.add_option("--sim_failure", dest='sim_failure', action="store_true", default=False) (options, args) = parser.parse_args() # Check NO list of positional arguments leftover after parsing options if ((len(args) != 0)): parser.print_help() sys.exit(1) # Run Cassandra Test r = RunTestCassandra(options, False) r.start_appserver() res = r.run_test() r.delete_all_launched_apps() # Cassandra-Stress Test Results result_json = json.dumps(res) print("Cassandra Stress Results: \n%s" % pformat(result_json)) r.stop_appserver() if __name__ == "__main__": RunTest(sys.argv)
ssh.exec_command(stop_cmd) # Determine delay before starting cassandra node (to simulate rejoin to the cluster) time_next_rejoin = random.randint(1, duration_secs/4) l.debug("START programmed in %s seconds" % time_next_rejoin)
random_line_split
main.rs
use oorandom; #[test] fn research_on_directionary() { // 词性分类函数, 找出.dic的标识符以及每个标识符 10 个id use std::fs; use std::collections::HashMap; let mut hash : HashMap<&str, Vec<&str>> = HashMap::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['0','1','2','3','4','5','6','7','8','9','\n'][..], ""); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; for tag in seperate_words { i = i + 1; // 奇数列为word, 偶数列为tag if i % 2 != 0 { last_word = tag; continue; } let hash_get = hash.get_mut(tag); match hash_get { None => { let vec = vec!(last_word); hash.insert(tag, vec); } Some(vec) => { if vec.len() >= 10 {continue;} vec.push(last_word); } } } println!("{:?}", hash); } #[test] fn add_directionary() { let mut random_word = Word::from_literal("okay"); random_word.set_tag("v"); let mut random_wor2 = Word::from_literal("no"); random_wor2.set_tag("l"); let mut directionary = Directionary::new(); directionary.add_a_word(&random_word); directionary.add_a_word(&random_wor2); println!("{:?}", directionary); } #[test] fn create_directionary() { let directionary = Directionary::from_default(None, Some(500)); println!("generation finished!"); println!("result {:?}", directionary); } #[test] fn create_sentance() { let mut resolver = RandomResolver::from_seed(64u128); let directionary = Directionary::from_default(None, None); let mut sentance = SentanceNode::word("其实") .next( SentanceNode::icon(',') .next( SentanceNode::element("Noun") .next( SentanceNode::word("是") .next( SentanceNode::element("Noun") .next( SentanceNode::icon(',') .next( SentanceNode::word("你知道吗?") )))))); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } #[test] fn multi_sentance() { let mut resolver = RandomResolver::from_seed(1024u128); let directionary = Directionary::from_default(None, None); let generic_sentance = sentance!( [element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."] ); for _ in 1..255 { let mut sentance = generic_sentance.clone(); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } } #[macro_export] macro_rules! sentance { ([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+ ) => { SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+)) }; ([$function:ident=$string:expr]) => { SentanceNode::$function($string) }; } #[derive(Debug)] struct Word { pub tag : String, pub literal : String, } impl Word { pub fn from_literal(init_literal : &str) -> Word { Word{ tag : String::new(), literal : String::from(init_literal) } } pub fn set_tag(&mut self, new_tag : &str) { self.tag = String::from(new_tag); } } #[derive(Debug)] struct TagMatcher { pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>, } impl TagMatcher { pub fn new() -> TagMatcher { use std::collections::BTreeMap; TagMatcher{matchers_pool : BTreeMap::new()} } fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self { let matcher_result = self.matchers_pool.get(tag); match matcher_result { Some(_) => { } None => { self.matchers_pool.insert(String::from(tag), Vec::new()); } } let matcher_vec = self.matchers_pool.get_mut(tag).unwrap(); for matcher in matchers { matcher_vec.push(String::from(matcher)); } self } pub fn resolve(&self, tag : String) -> Option<Vec<String>> { let mut ret_vec : Option<Vec<String>> = None; for (element, matchers) in &self.matchers_pool { for matcher in matchers { if *matcher == tag { match &mut ret_vec { Some(vec) => { vec.push(element.clone()); } None => { let vec = vec!(element.clone()); ret_vec = Some(vec); } } } } } ret_vec } } #[derive(Debug)] struct Directionary { // verbs : Vec<String>, // nouns : Vec<String>, // advs : Vec<String>, // adjs : Vec<String>, library : std::collections::HashMap<String, Vec<String>>, matcher : TagMatcher, } impl Directionary { pub fn new() -> Directionary { use std::collections::HashMap; // TODO FINISH THIS let tag_matcher = TagMatcher::new() .add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",)) .add("Name", vec!("nr","nba","nrfg","nrf","nrj",)) .add("Time", vec!("tg","t","Mg")) .add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",)) .add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",)) .add("Numeral",vec!("m")) .add("Quantifier", vec!("qv","q","qt",)) .add("IndependentVerb", vec!("vl",)) .add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",)) .add("IntranstiveVerb", vec!("vg","uguo","v","vf",)) .add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",)) .add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",)) .add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",)) .add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",)) .add("AskWhen", vec!("rzt")) .add("When", vec!("ryt")) .add("AskHow", vec!("ryv")) .add("AskWhere", vec!("rys")) .add("Where", vec!("rzs")) .add("Who", vec!("rr","rz","Rg",)) .add("AskWho", vec!("ry")) .add("Conjunction", vec!("rzv","u","c","cc",)) .add("Preposition", vec!("r","uyy","udeng","p","udh",)) .add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",)) .add("AllModals", vec!("y","e","o",)) .add("PostFixModal", vec!("y")) .add("PreFixModal", vec!("e")) .add("Onomatopoeia", vec!("o")); let new_library = HashMap::new(); Directionary { library : new_library, matcher : tag_matcher, } } pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary { // TODO use std::fs; let highest_frequency : u32 = match highest_input { Some(frequency) => frequency, None => 2147483647, }; let lowest_frequency : u32 = match lowest_input { Some(frequency) => frequency, None=> 0, }; let mut directionary = Directionary::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t"); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; let mut last_tag : &str = ""; let mut frequency; for tag in seperate_words { i = i + 1; // 第一列为word, 第二列为tag, 第三列为frequency let count = i % 3; match count { 0 => { // println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>()); frequency = tag.parse::<u32>().unwrap(); }, // 第三列 1 => { last_word = tag; continue; }, // 第一列 _ => { last_tag = tag; continue; }, // 第二列 } if (frequency > highest_frequency) || (frequency < lowest_frequency) { continue; } let mut word = Word::from_literal(last_word); word.set_tag(last_tag); directionary.add_a_word(&word); } directionary } pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String { let library_vec = self.library.get(element); match library_vec { Some(_) => {} None => {panic!("failed to get element type {}", element);} } let library_vec = library_vec.unwrap(); let word = library_vec.get( resolver.get_pos(library_vec.len())).unwrap(); word.clone() } pub fn add_a_word(&mut self, new_word : &Word) { let matcher_result = self.matcher.resolve(new_word.tag.clone()); // print!("word :{:?}, result :{:?}", &new_word, matcher_result); match matcher_result { Some(element_vec) => { for element in element_vec{ let library_result = self.library.get_mut(element.as_str()); match library_result { Some(ele_vec) => { ele_vec.push(new_word.literal.clone()); } None => { self.library.insert(element.clone(), vec!(new_word.literal.clone())); } } } } None => {} } } } #[derive(Debug)] struct RandomResolver { rng : oorandom::Rand64, } impl RandomResolver { pub fn from_seed(seed : u128) -> Self { let rng = oorandom::Rand64::new(seed); RandomResolver{rng : rng} } fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize { let float_result = self.rng.rand_float(); let mut sum = 0.0f64; let size = vec.len(); for i in 0..size { sum += vec[i]; if sum > float_result { return i; } } return size; } pub fn get_pos(&mut self, size : usize) -> usize { let float_result = self.rng.rand_float(); (size as f64 * float_result) as usize } } #[derive(Debug, Clone)] enum SentanceItem { Element(String), // element type to be resolve Word(String), Icon(char), } #[derive(Debug, Clone)] struct SentanceNode { item : SentanceItem, next : Option<Box<SentanceNode>>, } impl SentanceNode { pub fn element(element_name : &str) -> SentanceNode { SentanceNode { item : SentanceItem::Element(String::from(element_name)), next : None, } } pub fn icon(icon : char) -> SentanceNode{ SentanceNode { item : SentanceItem::Icon(icon), next : None, } } pub fn word(word : &str) -> SentanceNode{ SentanceNode { item : SentanceItem::Word(String::from(word)), next : None, } } // insert a node, panic if already has one pub fn next(mut self, next : SentanceNode) -> Self { match &self.next { Some(_) => { panic!("node {:?} already have a next node", &self); } None => { self.next = Some(Box::new(next)); } } self } // get the result from element fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { match &self.item { SentanceItem::Element(element) => { self.item = SentanceItem::Word( dict.find_a_word(element.as_str(), resolver)); } _ => {} } } pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { self.resolve(resolver, dict); match &mut self.next { Some(node) => {node.resolve_sentance(resolver, dict);} None => {} } } pub fn to_string(self) -> String { let mut string = String::new(); match self.item { SentanceItem::Word(word) => string.push_str(word.as_str()), SentanceItem::Icon(icon) => string.push(icon), _ => {} } match self.next { Some(node) => string.push_str(node.to_string().as_str()), None => {} } string } } #[derive(Debug)] struct ComedyWriter { possibilitys : Vec<f64>, sentances : Vec<Box<SentanceNode>>, resolver : RandomResolver, directionary : Directionary, } impl ComedyWriter { pub fn from_seed(random_seed : u128, highest_frequency : Option<u32>, lowest_frequency : Option<u32>) -> ComedyWriter { ComedyWriter{ possibilitys : Vec::new(), sentances : Vec::new(), resolver : RandomResolver::from_seed(random_seed), directionary : Directionary::from_default( highest_frequency, lowest_frequency), } } pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self { self.sentances.push(Box::new(sentance)); self.possibilitys.push(posssibility); self } fn normalize(&mut self) { let mut sum = 0.0f64; for chance in &self.possibilitys { sum += chance; } for chance in &mut self.possibilitys { *chance /= sum; } } pub fn write(&mut self, number : u32) -> String { self.normalize(); let mut article = String::new(); for _ in 0..number { let pos = self.resolver.resolve_pos(&self.possibilitys); let mut sentance = self.sentances[pos].clone(); sentance.resolve_sentance(&mut self.resolver, &self.directionary); let string = sentance.to_string(); article.push_str(string.as_str()); } article } } fn main() { let mut writer = ComedyWriter::from_seed( 65536, None, None); writer .add_node(sentance
[element ="Who"][word = "是从"][element = "Location"][word="来的."] ), 1.8) .add_node(sentance!( [element="AskWho"][word="在"][element ="Time"][element="TranstiveVerb"] [element="Adjective"][word="的"][element="GenericNoun"][icon='?'] ), 0.4) .add_node(sentance!( [word = "建议"][element = "Location"][word="的"][element="Name"] [element="Time"][word="就"][element="IntranstiveVerb"][icon='.'] ), 0.2); let result = writer.write(50); println!("{}",result); }
!(
identifier_name
main.rs
use oorandom; #[test] fn research_on_directionary() { // 词性分类函数, 找出.dic的标识符以及每个标识符 10 个id use std::fs; use std::collections::HashMap; let mut hash : HashMap<&str, Vec<&str>> = HashMap::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['0','1','2','3','4','5','6','7','8','9','\n'][..], ""); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; for tag in seperate_words {
continue; } let hash_get = hash.get_mut(tag); match hash_get { None => { let vec = vec!(last_word); hash.insert(tag, vec); } Some(vec) => { if vec.len() >= 10 {continue;} vec.push(last_word); } } } println!("{:?}", hash); } #[test] fn add_directionary() { let mut random_word = Word::from_literal("okay"); random_word.set_tag("v"); let mut random_wor2 = Word::from_literal("no"); random_wor2.set_tag("l"); let mut directionary = Directionary::new(); directionary.add_a_word(&random_word); directionary.add_a_word(&random_wor2); println!("{:?}", directionary); } #[test] fn create_directionary() { let directionary = Directionary::from_default(None, Some(500)); println!("generation finished!"); println!("result {:?}", directionary); } #[test] fn create_sentance() { let mut resolver = RandomResolver::from_seed(64u128); let directionary = Directionary::from_default(None, None); let mut sentance = SentanceNode::word("其实") .next( SentanceNode::icon(',') .next( SentanceNode::element("Noun") .next( SentanceNode::word("是") .next( SentanceNode::element("Noun") .next( SentanceNode::icon(',') .next( SentanceNode::word("你知道吗?") )))))); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } #[test] fn multi_sentance() { let mut resolver = RandomResolver::from_seed(1024u128); let directionary = Directionary::from_default(None, None); let generic_sentance = sentance!( [element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."] ); for _ in 1..255 { let mut sentance = generic_sentance.clone(); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } } #[macro_export] macro_rules! sentance { ([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+ ) => { SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+)) }; ([$function:ident=$string:expr]) => { SentanceNode::$function($string) }; } #[derive(Debug)] struct Word { pub tag : String, pub literal : String, } impl Word { pub fn from_literal(init_literal : &str) -> Word { Word{ tag : String::new(), literal : String::from(init_literal) } } pub fn set_tag(&mut self, new_tag : &str) { self.tag = String::from(new_tag); } } #[derive(Debug)] struct TagMatcher { pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>, } impl TagMatcher { pub fn new() -> TagMatcher { use std::collections::BTreeMap; TagMatcher{matchers_pool : BTreeMap::new()} } fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self { let matcher_result = self.matchers_pool.get(tag); match matcher_result { Some(_) => { } None => { self.matchers_pool.insert(String::from(tag), Vec::new()); } } let matcher_vec = self.matchers_pool.get_mut(tag).unwrap(); for matcher in matchers { matcher_vec.push(String::from(matcher)); } self } pub fn resolve(&self, tag : String) -> Option<Vec<String>> { let mut ret_vec : Option<Vec<String>> = None; for (element, matchers) in &self.matchers_pool { for matcher in matchers { if *matcher == tag { match &mut ret_vec { Some(vec) => { vec.push(element.clone()); } None => { let vec = vec!(element.clone()); ret_vec = Some(vec); } } } } } ret_vec } } #[derive(Debug)] struct Directionary { // verbs : Vec<String>, // nouns : Vec<String>, // advs : Vec<String>, // adjs : Vec<String>, library : std::collections::HashMap<String, Vec<String>>, matcher : TagMatcher, } impl Directionary { pub fn new() -> Directionary { use std::collections::HashMap; // TODO FINISH THIS let tag_matcher = TagMatcher::new() .add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",)) .add("Name", vec!("nr","nba","nrfg","nrf","nrj",)) .add("Time", vec!("tg","t","Mg")) .add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",)) .add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",)) .add("Numeral",vec!("m")) .add("Quantifier", vec!("qv","q","qt",)) .add("IndependentVerb", vec!("vl",)) .add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",)) .add("IntranstiveVerb", vec!("vg","uguo","v","vf",)) .add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",)) .add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",)) .add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",)) .add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",)) .add("AskWhen", vec!("rzt")) .add("When", vec!("ryt")) .add("AskHow", vec!("ryv")) .add("AskWhere", vec!("rys")) .add("Where", vec!("rzs")) .add("Who", vec!("rr","rz","Rg",)) .add("AskWho", vec!("ry")) .add("Conjunction", vec!("rzv","u","c","cc",)) .add("Preposition", vec!("r","uyy","udeng","p","udh",)) .add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",)) .add("AllModals", vec!("y","e","o",)) .add("PostFixModal", vec!("y")) .add("PreFixModal", vec!("e")) .add("Onomatopoeia", vec!("o")); let new_library = HashMap::new(); Directionary { library : new_library, matcher : tag_matcher, } } pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary { // TODO use std::fs; let highest_frequency : u32 = match highest_input { Some(frequency) => frequency, None => 2147483647, }; let lowest_frequency : u32 = match lowest_input { Some(frequency) => frequency, None=> 0, }; let mut directionary = Directionary::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t"); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; let mut last_tag : &str = ""; let mut frequency; for tag in seperate_words { i = i + 1; // 第一列为word, 第二列为tag, 第三列为frequency let count = i % 3; match count { 0 => { // println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>()); frequency = tag.parse::<u32>().unwrap(); }, // 第三列 1 => { last_word = tag; continue; }, // 第一列 _ => { last_tag = tag; continue; }, // 第二列 } if (frequency > highest_frequency) || (frequency < lowest_frequency) { continue; } let mut word = Word::from_literal(last_word); word.set_tag(last_tag); directionary.add_a_word(&word); } directionary } pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String { let library_vec = self.library.get(element); match library_vec { Some(_) => {} None => {panic!("failed to get element type {}", element);} } let library_vec = library_vec.unwrap(); let word = library_vec.get( resolver.get_pos(library_vec.len())).unwrap(); word.clone() } pub fn add_a_word(&mut self, new_word : &Word) { let matcher_result = self.matcher.resolve(new_word.tag.clone()); // print!("word :{:?}, result :{:?}", &new_word, matcher_result); match matcher_result { Some(element_vec) => { for element in element_vec{ let library_result = self.library.get_mut(element.as_str()); match library_result { Some(ele_vec) => { ele_vec.push(new_word.literal.clone()); } None => { self.library.insert(element.clone(), vec!(new_word.literal.clone())); } } } } None => {} } } } #[derive(Debug)] struct RandomResolver { rng : oorandom::Rand64, } impl RandomResolver { pub fn from_seed(seed : u128) -> Self { let rng = oorandom::Rand64::new(seed); RandomResolver{rng : rng} } fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize { let float_result = self.rng.rand_float(); let mut sum = 0.0f64; let size = vec.len(); for i in 0..size { sum += vec[i]; if sum > float_result { return i; } } return size; } pub fn get_pos(&mut self, size : usize) -> usize { let float_result = self.rng.rand_float(); (size as f64 * float_result) as usize } } #[derive(Debug, Clone)] enum SentanceItem { Element(String), // element type to be resolve Word(String), Icon(char), } #[derive(Debug, Clone)] struct SentanceNode { item : SentanceItem, next : Option<Box<SentanceNode>>, } impl SentanceNode { pub fn element(element_name : &str) -> SentanceNode { SentanceNode { item : SentanceItem::Element(String::from(element_name)), next : None, } } pub fn icon(icon : char) -> SentanceNode{ SentanceNode { item : SentanceItem::Icon(icon), next : None, } } pub fn word(word : &str) -> SentanceNode{ SentanceNode { item : SentanceItem::Word(String::from(word)), next : None, } } // insert a node, panic if already has one pub fn next(mut self, next : SentanceNode) -> Self { match &self.next { Some(_) => { panic!("node {:?} already have a next node", &self); } None => { self.next = Some(Box::new(next)); } } self } // get the result from element fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { match &self.item { SentanceItem::Element(element) => { self.item = SentanceItem::Word( dict.find_a_word(element.as_str(), resolver)); } _ => {} } } pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { self.resolve(resolver, dict); match &mut self.next { Some(node) => {node.resolve_sentance(resolver, dict);} None => {} } } pub fn to_string(self) -> String { let mut string = String::new(); match self.item { SentanceItem::Word(word) => string.push_str(word.as_str()), SentanceItem::Icon(icon) => string.push(icon), _ => {} } match self.next { Some(node) => string.push_str(node.to_string().as_str()), None => {} } string } } #[derive(Debug)] struct ComedyWriter { possibilitys : Vec<f64>, sentances : Vec<Box<SentanceNode>>, resolver : RandomResolver, directionary : Directionary, } impl ComedyWriter { pub fn from_seed(random_seed : u128, highest_frequency : Option<u32>, lowest_frequency : Option<u32>) -> ComedyWriter { ComedyWriter{ possibilitys : Vec::new(), sentances : Vec::new(), resolver : RandomResolver::from_seed(random_seed), directionary : Directionary::from_default( highest_frequency, lowest_frequency), } } pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self { self.sentances.push(Box::new(sentance)); self.possibilitys.push(posssibility); self } fn normalize(&mut self) { let mut sum = 0.0f64; for chance in &self.possibilitys { sum += chance; } for chance in &mut self.possibilitys { *chance /= sum; } } pub fn write(&mut self, number : u32) -> String { self.normalize(); let mut article = String::new(); for _ in 0..number { let pos = self.resolver.resolve_pos(&self.possibilitys); let mut sentance = self.sentances[pos].clone(); sentance.resolve_sentance(&mut self.resolver, &self.directionary); let string = sentance.to_string(); article.push_str(string.as_str()); } article } } fn main() { let mut writer = ComedyWriter::from_seed( 65536, None, None); writer .add_node(sentance!( [element ="Who"][word = "是从"][element = "Location"][word="来的."] ), 1.8) .add_node(sentance!( [element="AskWho"][word="在"][element ="Time"][element="TranstiveVerb"] [element="Adjective"][word="的"][element="GenericNoun"][icon='?'] ), 0.4) .add_node(sentance!( [word = "建议"][element = "Location"][word="的"][element="Name"] [element="Time"][word="就"][element="IntranstiveVerb"][icon='.'] ), 0.2); let result = writer.write(50); println!("{}",result); }
i = i + 1; // 奇数列为word, 偶数列为tag if i % 2 != 0 { last_word = tag;
random_line_split
main.rs
use oorandom; #[test] fn research_on_directionary() { // 词性分类函数, 找出.dic的标识符以及每个标识符 10 个id use std::fs; use std::collections::HashMap; let mut hash : HashMap<&str, Vec<&str>> = HashMap::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['0','1','2','3','4','5','6','7','8','9','\n'][..], ""); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; for tag in seperate_words { i = i + 1; // 奇数列为word, 偶数列为tag if i % 2 != 0 { last_word = tag; continue; } let hash_get = hash.get_mut(tag); match hash_get { None => { let vec = vec!(last_word); hash.insert(tag, vec); } Some(vec) => { if vec.len() >= 10 {continue;} vec.push(last_word); } } } println!("{:?}", hash); } #[test] fn add_directionary() { let mut random_word = Word::from_literal("okay"); random_word.set_tag("v"); let mut random_wor2 = Word::from_literal("no"); random_wor2.set_tag("l"); let mut directionary = Directionary::new(); directionary.add_a_word(&random_word); directionary.add_a_word(&random_wor2); println!("{:?}", directionary); } #[test] fn create_directionary() { let directionary = Directionary::from_default(None, Some(500)); println!("generation finished!"); println!("result {:?}", directionary); } #[test] fn create_sentance() { let mut resolver = RandomResolver::from_seed(64u128); let directionary = Directionary::from_default(None, None); let mut sentance = SentanceNode::word("其实") .next( SentanceNode::icon(',') .next( SentanceNode::element("Noun") .next( SentanceNode::word("是") .next( SentanceNode::element("Noun") .next( SentanceNode::icon(',') .next( SentanceNode::word("你知道吗?") )))))); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } #[test] fn multi_sentance() { let mut resolver = RandomResolver::from_seed(1024u128); let directionary = Directionary::from_default(None, None); let generic_sentance = sentance!( [element="Who"][word="是"][element= "Adjective"][word="的"][element="Adjective"][element="IntranstiveVerb"][word="器."] ); for _ in 1..255 { let mut sentance = generic_sentance.clone(); sentance.resolve_sentance(&mut resolver, &directionary); let output = sentance.to_string(); println!("result {}", output); } } #[macro_export] macro_rules! sentance { ([$function:ident=$string:expr]$([$nfunction:ident=$nstring:expr])+ ) => { SentanceNode::$function($string).next(sentance!($([$nfunction=$nstring])+)) }; ([$function:ident=$string:expr]) => { SentanceNode::$function($string) }; } #[derive(Debug)] struct Word { pub tag : String, pub literal : String, } impl Word { pub fn from_literal(init_literal : &str) -> Word { Word{ tag : String::new(), literal : String::from(init_literal) } } pub fn set_tag(&mut self, new_tag : &str) { self.tag = String::from(new_tag); } } #[derive(Debug)] struct TagMatcher { pub matchers_pool : std::collections::BTreeMap<String, Vec<String>>, } impl TagMatcher { pub fn new() -> TagMatcher { use std::collections::BTreeMap; TagMatcher{matchers_pool : BTreeMap::new()} } fn add(mut self, tag : &'static str, matchers : Vec<&'static str>) -> Self { let matcher_result = self.matchers_pool.get(tag); match matcher_result { Some(_) => { } None => { self.matchers_pool.insert(String::from(tag), Vec::new()); } } let matcher_vec = self.matchers_pool.get_mut(tag).unwrap(); for matcher in matchers { matcher_vec.push(String::from(matcher)); } self } pub fn resolve(&self, tag : String) -> Option<Vec<String>> { let mut ret_vec : Option<Vec<String>> = None; for (element, matchers) in &self.matchers_pool { for matcher in matchers { if *matcher == tag { match &mut ret_vec { Some(vec) => { vec.push(element.clone()); } None => { let vec = vec!(element.clone()); ret_vec = Some(vec); } } } } } ret_vec } } #[derive(Debug)] struct Directionary { // verbs : Vec<String>, // nouns : Vec<String>, // advs : Vec<String>, // adjs : Vec<String>, library : std::collections::HashMap<String, Vec<String>>, matcher : TagMatcher, } impl Directionary { pub fn new() -> Directionary { use std::collections::HashMap; // TODO FINISH THIS let tag_matcher = TagMatcher::new() .add("Location", vec!("nis","ntcb","ntcf","s","na","ns","ntc","nts","nth","ntch","nto","nit","nt","nsf","nz","f","ntu","nsf",)) .add("Name", vec!("nr","nba","nrfg","nrf","nrj",)) .add("Time", vec!("tg","t","Mg")) .add("GenericNoun", vec!("gb","vf","nnd","nhd","nmc","nbc","gc","nhm","ng","gg","gi","n","gp","gm","nnt",)) .add("AllNouns", vec!("vf","nis","ntcb","ntcf","gb","nhd","j","nr","nba","s","nmc","nnd","nrfg","na","ns","ntc","nbc","gc","nts","nth","x","ntch","nto","nit","nrf","nhm","ng","nrt","ntu","gg","gi","nt","nsf","nrj","nz","f","n","gp","gm","tg","nnt","t","Mg",)) .add("Numeral",vec!("m")) .add("Quantifier", vec!("qv","q","qt",)) .add("IndependentVerb", vec!("vl",)) .add("TranstiveVerb", vec!("pba","pbei","vyou","vshi","vd","vx","vq","vi","vn",)) .add("IntranstiveVerb", vec!("vg","uguo","v","vf",)) .add("AllVerbs", vec!("vyou","uguo","vd","v","vx","vi","pba","pbei","vl","vg","vq","vn","vshi","vf",)) .add("Adjective", vec!("b","mq","bl","a","z","al","ag","an","œa",)) .add("Adverb",vec!("b","bl","ad","d","dl","œa","dg",)) .add("AllPronouns", vec!("rr","rz","ryt","Rg","ry","rys","rzs","rzt","ryv","k",)) .add("AskWhen", vec!("rzt")) .add("When", vec!("ryt")) .add("AskHow", vec!("ryv")) .add("AskWhere", vec!("rys")) .add("Where", vec!("rzs")) .add("Who", vec!("rr","rz","Rg",)) .add("AskWho", vec!("ry")) .add("Conjunction", vec!("rzv","u","c","cc",)) .add("Preposition", vec!("r","uyy","udeng","p","udh",)) .add("Particle", vec!("uzhe","uls","ule","usuo","ulian","uzhi","ude",)) .add("AllModals", vec!("y","e","o",)) .add("PostFixModal", vec!("y")) .add("PreFixModal", vec!("e")) .add("Onomatopoeia", vec!("o")); let new_library = HashMap::new(); Directionary { library : new_library, matcher : tag_matcher, } } pub fn from_default(highest_input : Option<u32>, lowest_input : Option<u32>) -> Directionary { // TODO use std::fs; let highest_frequency : u32 = match highest_input { Some(frequency) => frequency, None => 2147483647, }; let lowest_frequency : u32 = match lowest_input { Some(frequency) => frequency, None=> 0, }; let mut directionary = Directionary::new(); let raw_bytes = fs::read_to_string("resources/ansj_seg-master/default.dic").expect("failed to open directionary file"); let filterd_bytes = raw_bytes.replace(&['\n'][..], "\t"); let seperate_words : Vec<&str> = filterd_bytes.split('\t').collect(); let mut i : u64 = 0; let mut last_word : &str = ""; let mut last_tag : &str = ""; let mut frequency; for tag in seperate_words { i = i + 1; // 第一列为word, 第二列为tag, 第三列为frequency let count = i % 3; match count { 0 => { // println!("tag{:?}, result{:?}", &tag, tag.parse::<u32>()); frequency = tag.parse::<u32>().unwrap(); }, // 第三列 1 => { last_word = tag; continue; }, // 第一列 _ => { last_tag = tag; continue; }, // 第二列 } if (frequency > highest_frequency) || (frequency < lowest_frequency) { continue; } let mut word = Word::from_literal(last_word); word.set_tag(last_tag); directionary.add_a_word(&word); } directionary } pub fn find_a_word(&self, element : &str, resolver : &mut RandomResolver) -> String { let library_vec = self.library.get(element); match library_vec { Some(_) => {} None => {panic!("failed to get element type {}", element);} } let library_vec = library_vec.unwrap(); let word = library_vec.get( resolver.get_pos(library_vec.len())).unwrap(); word.clone() } pub fn add_a_word(&mut self, new_word : &Word) { let matcher_result = self.matcher.resolve(new_word.tag.clone()); // print!("word :{:?}, result :{:?}",
ed(seed : u128) -> Self { let rng = oorandom::Rand64::new(seed); RandomResolver{rng : rng} } fn resolve_pos(&mut self, vec : &Vec<f64>) -> usize { let float_result = self.rng.rand_float(); let mut sum = 0.0f64; let size = vec.len(); for i in 0..size { sum += vec[i]; if sum > float_result { return i; } } return size; } pub fn get_pos(&mut self, size : usize) -> usize { let float_result = self.rng.rand_float(); (size as f64 * float_result) as usize } } #[derive(Debug, Clone)] enum SentanceItem { Element(String), // element type to be resolve Word(String), Icon(char), } #[derive(Debug, Clone)] struct SentanceNode { item : SentanceItem, next : Option<Box<SentanceNode>>, } impl SentanceNode { pub fn element(element_name : &str) -> SentanceNode { SentanceNode { item : SentanceItem::Element(String::from(element_name)), next : None, } } pub fn icon(icon : char) -> SentanceNode{ SentanceNode { item : SentanceItem::Icon(icon), next : None, } } pub fn word(word : &str) -> SentanceNode{ SentanceNode { item : SentanceItem::Word(String::from(word)), next : None, } } // insert a node, panic if already has one pub fn next(mut self, next : SentanceNode) -> Self { match &self.next { Some(_) => { panic!("node {:?} already have a next node", &self); } None => { self.next = Some(Box::new(next)); } } self } // get the result from element fn resolve(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { match &self.item { SentanceItem::Element(element) => { self.item = SentanceItem::Word( dict.find_a_word(element.as_str(), resolver)); } _ => {} } } pub fn resolve_sentance(&mut self, resolver : &mut RandomResolver, dict : &Directionary) { self.resolve(resolver, dict); match &mut self.next { Some(node) => {node.resolve_sentance(resolver, dict);} None => {} } } pub fn to_string(self) -> String { let mut string = String::new(); match self.item { SentanceItem::Word(word) => string.push_str(word.as_str()), SentanceItem::Icon(icon) => string.push(icon), _ => {} } match self.next { Some(node) => string.push_str(node.to_string().as_str()), None => {} } string } } #[derive(Debug)] struct ComedyWriter { possibilitys : Vec<f64>, sentances : Vec<Box<SentanceNode>>, resolver : RandomResolver, directionary : Directionary, } impl ComedyWriter { pub fn from_seed(random_seed : u128, highest_frequency : Option<u32>, lowest_frequency : Option<u32>) -> ComedyWriter { ComedyWriter{ possibilitys : Vec::new(), sentances : Vec::new(), resolver : RandomResolver::from_seed(random_seed), directionary : Directionary::from_default( highest_frequency, lowest_frequency), } } pub fn add_node(&mut self, sentance : SentanceNode, posssibility : f64) -> &mut Self { self.sentances.push(Box::new(sentance)); self.possibilitys.push(posssibility); self } fn normalize(&mut self) { let mut sum = 0.0f64; for chance in &self.possibilitys { sum += chance; } for chance in &mut self.possibilitys { *chance /= sum; } } pub fn write(&mut self, number : u32) -> String { self.normalize(); let mut article = String::new(); for _ in 0..number { let pos = self.resolver.resolve_pos(&self.possibilitys); let mut sentance = self.sentances[pos].clone(); sentance.resolve_sentance(&mut self.resolver, &self.directionary); let string = sentance.to_string(); article.push_str(string.as_str()); } article } } fn main() { let mut writer = ComedyWriter::from_seed( 65536, None, None); writer .add_node(sentance!( [element ="Who"][word = "是从"][element = "Location"][word="来的."] ), 1.8) .add_node(sentance!( [element="AskWho"][word="在"][element ="Time"][element="TranstiveVerb"] [element="Adjective"][word="的"][element="GenericNoun"][icon='?'] ), 0.4) .add_node(sentance!( [word = "建议"][element = "Location"][word="的"][element="Name"] [element="Time"][word="就"][element="IntranstiveVerb"][icon='.'] ), 0.2); let result = writer.write(50); println!("{}",result); }
&new_word, matcher_result); match matcher_result { Some(element_vec) => { for element in element_vec{ let library_result = self.library.get_mut(element.as_str()); match library_result { Some(ele_vec) => { ele_vec.push(new_word.literal.clone()); } None => { self.library.insert(element.clone(), vec!(new_word.literal.clone())); } } } } None => {} } } } #[derive(Debug)] struct RandomResolver { rng : oorandom::Rand64, } impl RandomResolver { pub fn from_se
identifier_body
users.py
""" API operations on User objects. """ import logging from sqlalchemy import false, true, or_ from galaxy import exceptions, util, web from galaxy.managers import users
from galaxy.web.base.controller import BaseAPIController from galaxy.web.base.controller import CreatesApiKeysMixin from galaxy.web.base.controller import CreatesUsersMixin from galaxy.web.base.controller import UsesTagsMixin log = logging.getLogger( __name__ ) class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ): def __init__(self, app): super(UserAPIController, self).__init__(app) self.user_manager = users.UserManager(app) self.user_serializer = users.UserSerializer( app ) self.user_deserializer = users.UserDeserializer( app ) @expose_api def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ): """ GET /api/users GET /api/users/deleted Displays a collection (list) of users. :param deleted: (optional) If true, show deleted users :type deleted: bool :param f_email: (optional) An email address to filter on. (Non-admin users can only use this if ``expose_user_email`` is ``True`` in galaxy.ini) :type f_email: str :param f_name: (optional) A username to filter on. (Non-admin users can only use this if ``expose_user_name`` is ``True`` in galaxy.ini) :type f_name: str :param f_any: (optional) Filter on username OR email. (Non-admin users can use this, the email filter and username filter will only be active if their corresponding ``expose_user_*`` is ``True`` in galaxy.ini) :type f_any: str """ rval = [] query = trans.sa_session.query( trans.app.model.User ) deleted = util.string_as_bool( deleted ) if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email): query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) ) if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name): query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) ) if f_any: if trans.user_is_admin(): query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) else: if trans.app.config.expose_user_email and trans.app.config.expose_user_name: query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) elif trans.app.config.expose_user_email: query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) ) elif trans.app.config.expose_user_name: query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) ) if deleted: query = query.filter( trans.app.model.User.table.c.deleted == true() ) # only admins can see deleted users if not trans.user_is_admin(): return [] else: query = query.filter( trans.app.model.User.table.c.deleted == false() ) # special case: user can see only their own user # special case2: if the galaxy admin has specified that other user email/names are # exposed, we don't want special case #1 if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email: item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) return [item] for user in query: item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) # If NOT configured to expose_email, do not expose email UNLESS the user is self, or # the user is an admin if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin(): del item['username'] if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin(): del item['email'] # TODO: move into api_values rval.append( item ) return rval @expose_api_anonymous def show( self, trans, id, deleted='False', **kwd ): """ GET /api/users/{encoded_user_id} GET /api/users/deleted/{encoded_user_id} GET /api/users/current Displays information about a user. """ deleted = util.string_as_bool( deleted ) try: # user is requesting data about themselves if id == "current": # ...and is anonymous - return usage and quota (if any) if not trans.user: item = self.anon_user_api_value( trans ) return item # ...and is logged in - return full else: user = trans.user else: user = self.get_user( trans, id, deleted=deleted ) # check that the user is requesting themselves (and they aren't del'd) unless admin if not trans.user_is_admin(): assert trans.user == user assert not user.deleted except: raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id ) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api def create( self, trans, payload, **kwd ): """ POST /api/users Creates a new Galaxy user. """ if not trans.app.config.allow_user_creation and not trans.user_is_admin(): raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' ) if trans.app.config.use_remote_user and trans.user_is_admin(): user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] ) elif trans.user_is_admin(): username = payload[ 'username' ] email = payload[ 'email' ] password = payload[ 'password' ] message = "\n".join( [ validate_email( trans, email ), validate_password( trans, password, password ), validate_publicname( trans, username ) ] ).rstrip() if message: raise exceptions.RequestParameterInvalidException( message ) else: user = self.create_user( trans=trans, email=email, username=username, password=password ) else: raise exceptions.NotImplemented() item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'total_disk_usage': float } ) return item @expose_api @web.require_admin def api_key( self, trans, user_id, **kwd ): """ POST /api/users/{encoded_user_id}/api_key Creates a new API key for specified user. """ user = self.get_user( trans, user_id ) key = self.create_api_key( trans, user ) return key @expose_api def update( self, trans, id, payload, **kwd ): """ update( self, trans, id, payload, **kwd ) * PUT /api/users/{id} updates the values for the item with the given ``id`` :type id: str :param id: the encoded id of the item to update :type payload: dict :param payload: a dictionary of new attribute values :rtype: dict :returns: an error object if an error occurred or a dictionary containing the serialized item after any changes """ current_user = trans.user user_to_update = self.user_manager.by_id( self.decode_id( id ) ) # only allow updating other users if they're admin editing_someone_else = current_user != user_to_update is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user ) if editing_someone_else and not is_admin: raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id ) self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans ) return self.user_serializer.serialize_to_view( user_to_update, view='detailed' ) @expose_api @web.require_admin def delete( self, trans, id, **kwd ): """ DELETE /api/users/{id} delete the user with the given ``id`` :param id: the encoded id of the user to delete :type id: str :param purge: (optional) if True, purge the user :type purge: bool """ if not trans.app.config.allow_user_deletion: raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' ) purge = util.string_as_bool(kwd.get('purge', False)) if purge: raise exceptions.NotImplemented('Purge option has not been implemented yet') user = self.get_user(trans, id) self.user_manager.delete(user) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api @web.require_admin def undelete( self, trans, **kwd ): raise exceptions.NotImplemented() # TODO: move to more basal, common resource than this def anon_user_api_value( self, trans ): """ Returns data for an anonymous user, truncated to only usage and quota_percent """ usage = trans.app.quota_agent.get_usage( trans ) percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage ) return {'total_disk_usage': int( usage ), 'nice_total_disk_usage': util.nice_size( usage ), 'quota_percent': percent}
from galaxy.security.validate_user_input import validate_email from galaxy.security.validate_user_input import validate_password from galaxy.security.validate_user_input import validate_publicname from galaxy.web import _future_expose_api as expose_api from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous
random_line_split
users.py
""" API operations on User objects. """ import logging from sqlalchemy import false, true, or_ from galaxy import exceptions, util, web from galaxy.managers import users from galaxy.security.validate_user_input import validate_email from galaxy.security.validate_user_input import validate_password from galaxy.security.validate_user_input import validate_publicname from galaxy.web import _future_expose_api as expose_api from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous from galaxy.web.base.controller import BaseAPIController from galaxy.web.base.controller import CreatesApiKeysMixin from galaxy.web.base.controller import CreatesUsersMixin from galaxy.web.base.controller import UsesTagsMixin log = logging.getLogger( __name__ ) class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ): def __init__(self, app): super(UserAPIController, self).__init__(app) self.user_manager = users.UserManager(app) self.user_serializer = users.UserSerializer( app ) self.user_deserializer = users.UserDeserializer( app ) @expose_api def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ): """ GET /api/users GET /api/users/deleted Displays a collection (list) of users. :param deleted: (optional) If true, show deleted users :type deleted: bool :param f_email: (optional) An email address to filter on. (Non-admin users can only use this if ``expose_user_email`` is ``True`` in galaxy.ini) :type f_email: str :param f_name: (optional) A username to filter on. (Non-admin users can only use this if ``expose_user_name`` is ``True`` in galaxy.ini) :type f_name: str :param f_any: (optional) Filter on username OR email. (Non-admin users can use this, the email filter and username filter will only be active if their corresponding ``expose_user_*`` is ``True`` in galaxy.ini) :type f_any: str """ rval = [] query = trans.sa_session.query( trans.app.model.User ) deleted = util.string_as_bool( deleted ) if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email): query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) ) if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name): query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) ) if f_any: if trans.user_is_admin(): query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) else: if trans.app.config.expose_user_email and trans.app.config.expose_user_name: query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) elif trans.app.config.expose_user_email: query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) ) elif trans.app.config.expose_user_name: query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) ) if deleted: query = query.filter( trans.app.model.User.table.c.deleted == true() ) # only admins can see deleted users if not trans.user_is_admin(): return [] else: query = query.filter( trans.app.model.User.table.c.deleted == false() ) # special case: user can see only their own user # special case2: if the galaxy admin has specified that other user email/names are # exposed, we don't want special case #1 if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email: item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) return [item] for user in query: item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) # If NOT configured to expose_email, do not expose email UNLESS the user is self, or # the user is an admin if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin(): del item['username'] if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin(): del item['email'] # TODO: move into api_values rval.append( item ) return rval @expose_api_anonymous def show( self, trans, id, deleted='False', **kwd ): """ GET /api/users/{encoded_user_id} GET /api/users/deleted/{encoded_user_id} GET /api/users/current Displays information about a user. """ deleted = util.string_as_bool( deleted ) try: # user is requesting data about themselves if id == "current": # ...and is anonymous - return usage and quota (if any) if not trans.user:
# ...and is logged in - return full else: user = trans.user else: user = self.get_user( trans, id, deleted=deleted ) # check that the user is requesting themselves (and they aren't del'd) unless admin if not trans.user_is_admin(): assert trans.user == user assert not user.deleted except: raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id ) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api def create( self, trans, payload, **kwd ): """ POST /api/users Creates a new Galaxy user. """ if not trans.app.config.allow_user_creation and not trans.user_is_admin(): raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' ) if trans.app.config.use_remote_user and trans.user_is_admin(): user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] ) elif trans.user_is_admin(): username = payload[ 'username' ] email = payload[ 'email' ] password = payload[ 'password' ] message = "\n".join( [ validate_email( trans, email ), validate_password( trans, password, password ), validate_publicname( trans, username ) ] ).rstrip() if message: raise exceptions.RequestParameterInvalidException( message ) else: user = self.create_user( trans=trans, email=email, username=username, password=password ) else: raise exceptions.NotImplemented() item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'total_disk_usage': float } ) return item @expose_api @web.require_admin def api_key( self, trans, user_id, **kwd ): """ POST /api/users/{encoded_user_id}/api_key Creates a new API key for specified user. """ user = self.get_user( trans, user_id ) key = self.create_api_key( trans, user ) return key @expose_api def update( self, trans, id, payload, **kwd ): """ update( self, trans, id, payload, **kwd ) * PUT /api/users/{id} updates the values for the item with the given ``id`` :type id: str :param id: the encoded id of the item to update :type payload: dict :param payload: a dictionary of new attribute values :rtype: dict :returns: an error object if an error occurred or a dictionary containing the serialized item after any changes """ current_user = trans.user user_to_update = self.user_manager.by_id( self.decode_id( id ) ) # only allow updating other users if they're admin editing_someone_else = current_user != user_to_update is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user ) if editing_someone_else and not is_admin: raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id ) self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans ) return self.user_serializer.serialize_to_view( user_to_update, view='detailed' ) @expose_api @web.require_admin def delete( self, trans, id, **kwd ): """ DELETE /api/users/{id} delete the user with the given ``id`` :param id: the encoded id of the user to delete :type id: str :param purge: (optional) if True, purge the user :type purge: bool """ if not trans.app.config.allow_user_deletion: raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' ) purge = util.string_as_bool(kwd.get('purge', False)) if purge: raise exceptions.NotImplemented('Purge option has not been implemented yet') user = self.get_user(trans, id) self.user_manager.delete(user) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api @web.require_admin def undelete( self, trans, **kwd ): raise exceptions.NotImplemented() # TODO: move to more basal, common resource than this def anon_user_api_value( self, trans ): """ Returns data for an anonymous user, truncated to only usage and quota_percent """ usage = trans.app.quota_agent.get_usage( trans ) percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage ) return {'total_disk_usage': int( usage ), 'nice_total_disk_usage': util.nice_size( usage ), 'quota_percent': percent}
item = self.anon_user_api_value( trans ) return item
conditional_block
users.py
""" API operations on User objects. """ import logging from sqlalchemy import false, true, or_ from galaxy import exceptions, util, web from galaxy.managers import users from galaxy.security.validate_user_input import validate_email from galaxy.security.validate_user_input import validate_password from galaxy.security.validate_user_input import validate_publicname from galaxy.web import _future_expose_api as expose_api from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous from galaxy.web.base.controller import BaseAPIController from galaxy.web.base.controller import CreatesApiKeysMixin from galaxy.web.base.controller import CreatesUsersMixin from galaxy.web.base.controller import UsesTagsMixin log = logging.getLogger( __name__ ) class
( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ): def __init__(self, app): super(UserAPIController, self).__init__(app) self.user_manager = users.UserManager(app) self.user_serializer = users.UserSerializer( app ) self.user_deserializer = users.UserDeserializer( app ) @expose_api def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ): """ GET /api/users GET /api/users/deleted Displays a collection (list) of users. :param deleted: (optional) If true, show deleted users :type deleted: bool :param f_email: (optional) An email address to filter on. (Non-admin users can only use this if ``expose_user_email`` is ``True`` in galaxy.ini) :type f_email: str :param f_name: (optional) A username to filter on. (Non-admin users can only use this if ``expose_user_name`` is ``True`` in galaxy.ini) :type f_name: str :param f_any: (optional) Filter on username OR email. (Non-admin users can use this, the email filter and username filter will only be active if their corresponding ``expose_user_*`` is ``True`` in galaxy.ini) :type f_any: str """ rval = [] query = trans.sa_session.query( trans.app.model.User ) deleted = util.string_as_bool( deleted ) if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email): query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) ) if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name): query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) ) if f_any: if trans.user_is_admin(): query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) else: if trans.app.config.expose_user_email and trans.app.config.expose_user_name: query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) elif trans.app.config.expose_user_email: query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) ) elif trans.app.config.expose_user_name: query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) ) if deleted: query = query.filter( trans.app.model.User.table.c.deleted == true() ) # only admins can see deleted users if not trans.user_is_admin(): return [] else: query = query.filter( trans.app.model.User.table.c.deleted == false() ) # special case: user can see only their own user # special case2: if the galaxy admin has specified that other user email/names are # exposed, we don't want special case #1 if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email: item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) return [item] for user in query: item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) # If NOT configured to expose_email, do not expose email UNLESS the user is self, or # the user is an admin if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin(): del item['username'] if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin(): del item['email'] # TODO: move into api_values rval.append( item ) return rval @expose_api_anonymous def show( self, trans, id, deleted='False', **kwd ): """ GET /api/users/{encoded_user_id} GET /api/users/deleted/{encoded_user_id} GET /api/users/current Displays information about a user. """ deleted = util.string_as_bool( deleted ) try: # user is requesting data about themselves if id == "current": # ...and is anonymous - return usage and quota (if any) if not trans.user: item = self.anon_user_api_value( trans ) return item # ...and is logged in - return full else: user = trans.user else: user = self.get_user( trans, id, deleted=deleted ) # check that the user is requesting themselves (and they aren't del'd) unless admin if not trans.user_is_admin(): assert trans.user == user assert not user.deleted except: raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id ) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api def create( self, trans, payload, **kwd ): """ POST /api/users Creates a new Galaxy user. """ if not trans.app.config.allow_user_creation and not trans.user_is_admin(): raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' ) if trans.app.config.use_remote_user and trans.user_is_admin(): user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] ) elif trans.user_is_admin(): username = payload[ 'username' ] email = payload[ 'email' ] password = payload[ 'password' ] message = "\n".join( [ validate_email( trans, email ), validate_password( trans, password, password ), validate_publicname( trans, username ) ] ).rstrip() if message: raise exceptions.RequestParameterInvalidException( message ) else: user = self.create_user( trans=trans, email=email, username=username, password=password ) else: raise exceptions.NotImplemented() item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'total_disk_usage': float } ) return item @expose_api @web.require_admin def api_key( self, trans, user_id, **kwd ): """ POST /api/users/{encoded_user_id}/api_key Creates a new API key for specified user. """ user = self.get_user( trans, user_id ) key = self.create_api_key( trans, user ) return key @expose_api def update( self, trans, id, payload, **kwd ): """ update( self, trans, id, payload, **kwd ) * PUT /api/users/{id} updates the values for the item with the given ``id`` :type id: str :param id: the encoded id of the item to update :type payload: dict :param payload: a dictionary of new attribute values :rtype: dict :returns: an error object if an error occurred or a dictionary containing the serialized item after any changes """ current_user = trans.user user_to_update = self.user_manager.by_id( self.decode_id( id ) ) # only allow updating other users if they're admin editing_someone_else = current_user != user_to_update is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user ) if editing_someone_else and not is_admin: raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id ) self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans ) return self.user_serializer.serialize_to_view( user_to_update, view='detailed' ) @expose_api @web.require_admin def delete( self, trans, id, **kwd ): """ DELETE /api/users/{id} delete the user with the given ``id`` :param id: the encoded id of the user to delete :type id: str :param purge: (optional) if True, purge the user :type purge: bool """ if not trans.app.config.allow_user_deletion: raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' ) purge = util.string_as_bool(kwd.get('purge', False)) if purge: raise exceptions.NotImplemented('Purge option has not been implemented yet') user = self.get_user(trans, id) self.user_manager.delete(user) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api @web.require_admin def undelete( self, trans, **kwd ): raise exceptions.NotImplemented() # TODO: move to more basal, common resource than this def anon_user_api_value( self, trans ): """ Returns data for an anonymous user, truncated to only usage and quota_percent """ usage = trans.app.quota_agent.get_usage( trans ) percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage ) return {'total_disk_usage': int( usage ), 'nice_total_disk_usage': util.nice_size( usage ), 'quota_percent': percent}
UserAPIController
identifier_name
users.py
""" API operations on User objects. """ import logging from sqlalchemy import false, true, or_ from galaxy import exceptions, util, web from galaxy.managers import users from galaxy.security.validate_user_input import validate_email from galaxy.security.validate_user_input import validate_password from galaxy.security.validate_user_input import validate_publicname from galaxy.web import _future_expose_api as expose_api from galaxy.web import _future_expose_api_anonymous as expose_api_anonymous from galaxy.web.base.controller import BaseAPIController from galaxy.web.base.controller import CreatesApiKeysMixin from galaxy.web.base.controller import CreatesUsersMixin from galaxy.web.base.controller import UsesTagsMixin log = logging.getLogger( __name__ ) class UserAPIController( BaseAPIController, UsesTagsMixin, CreatesUsersMixin, CreatesApiKeysMixin ): def __init__(self, app): super(UserAPIController, self).__init__(app) self.user_manager = users.UserManager(app) self.user_serializer = users.UserSerializer( app ) self.user_deserializer = users.UserDeserializer( app ) @expose_api def index( self, trans, deleted='False', f_email=None, f_name=None, f_any=None, **kwd ): """ GET /api/users GET /api/users/deleted Displays a collection (list) of users. :param deleted: (optional) If true, show deleted users :type deleted: bool :param f_email: (optional) An email address to filter on. (Non-admin users can only use this if ``expose_user_email`` is ``True`` in galaxy.ini) :type f_email: str :param f_name: (optional) A username to filter on. (Non-admin users can only use this if ``expose_user_name`` is ``True`` in galaxy.ini) :type f_name: str :param f_any: (optional) Filter on username OR email. (Non-admin users can use this, the email filter and username filter will only be active if their corresponding ``expose_user_*`` is ``True`` in galaxy.ini) :type f_any: str """ rval = [] query = trans.sa_session.query( trans.app.model.User ) deleted = util.string_as_bool( deleted ) if f_email and (trans.user_is_admin() or trans.app.config.expose_user_email): query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_email) ) if f_name and (trans.user_is_admin() or trans.app.config.expose_user_name): query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_name) ) if f_any: if trans.user_is_admin(): query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) else: if trans.app.config.expose_user_email and trans.app.config.expose_user_name: query = query.filter(or_( trans.app.model.User.email.like("%%%s%%" % f_any), trans.app.model.User.username.like("%%%s%%" % f_any) )) elif trans.app.config.expose_user_email: query = query.filter( trans.app.model.User.email.like("%%%s%%" % f_any) ) elif trans.app.config.expose_user_name: query = query.filter( trans.app.model.User.username.like("%%%s%%" % f_any) ) if deleted: query = query.filter( trans.app.model.User.table.c.deleted == true() ) # only admins can see deleted users if not trans.user_is_admin(): return [] else: query = query.filter( trans.app.model.User.table.c.deleted == false() ) # special case: user can see only their own user # special case2: if the galaxy admin has specified that other user email/names are # exposed, we don't want special case #1 if not trans.user_is_admin() and not trans.app.config.expose_user_name and not trans.app.config.expose_user_email: item = trans.user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) return [item] for user in query: item = user.to_dict( value_mapper={ 'id': trans.security.encode_id } ) # If NOT configured to expose_email, do not expose email UNLESS the user is self, or # the user is an admin if not trans.app.config.expose_user_name and user is not trans.user and not trans.user_is_admin(): del item['username'] if not trans.app.config.expose_user_email and user is not trans.user and not trans.user_is_admin(): del item['email'] # TODO: move into api_values rval.append( item ) return rval @expose_api_anonymous def show( self, trans, id, deleted='False', **kwd ): """ GET /api/users/{encoded_user_id} GET /api/users/deleted/{encoded_user_id} GET /api/users/current Displays information about a user. """ deleted = util.string_as_bool( deleted ) try: # user is requesting data about themselves if id == "current": # ...and is anonymous - return usage and quota (if any) if not trans.user: item = self.anon_user_api_value( trans ) return item # ...and is logged in - return full else: user = trans.user else: user = self.get_user( trans, id, deleted=deleted ) # check that the user is requesting themselves (and they aren't del'd) unless admin if not trans.user_is_admin(): assert trans.user == user assert not user.deleted except: raise exceptions.RequestParameterInvalidException( 'Invalid user id specified', id=id ) return self.user_serializer.serialize_to_view(user, view='detailed') @expose_api def create( self, trans, payload, **kwd ): """ POST /api/users Creates a new Galaxy user. """ if not trans.app.config.allow_user_creation and not trans.user_is_admin(): raise exceptions.ConfigDoesNotAllowException( 'User creation is not allowed in this Galaxy instance' ) if trans.app.config.use_remote_user and trans.user_is_admin(): user = trans.get_or_create_remote_user( remote_user_email=payload['remote_user_email'] ) elif trans.user_is_admin(): username = payload[ 'username' ] email = payload[ 'email' ] password = payload[ 'password' ] message = "\n".join( [ validate_email( trans, email ), validate_password( trans, password, password ), validate_publicname( trans, username ) ] ).rstrip() if message: raise exceptions.RequestParameterInvalidException( message ) else: user = self.create_user( trans=trans, email=email, username=username, password=password ) else: raise exceptions.NotImplemented() item = user.to_dict( view='element', value_mapper={ 'id': trans.security.encode_id, 'total_disk_usage': float } ) return item @expose_api @web.require_admin def api_key( self, trans, user_id, **kwd ): """ POST /api/users/{encoded_user_id}/api_key Creates a new API key for specified user. """ user = self.get_user( trans, user_id ) key = self.create_api_key( trans, user ) return key @expose_api def update( self, trans, id, payload, **kwd ): """ update( self, trans, id, payload, **kwd ) * PUT /api/users/{id} updates the values for the item with the given ``id`` :type id: str :param id: the encoded id of the item to update :type payload: dict :param payload: a dictionary of new attribute values :rtype: dict :returns: an error object if an error occurred or a dictionary containing the serialized item after any changes """ current_user = trans.user user_to_update = self.user_manager.by_id( self.decode_id( id ) ) # only allow updating other users if they're admin editing_someone_else = current_user != user_to_update is_admin = trans.api_inherit_admin or self.user_manager.is_admin( current_user ) if editing_someone_else and not is_admin: raise exceptions.InsufficientPermissionsException( 'you are not allowed to update that user', id=id ) self.user_deserializer.deserialize( user_to_update, payload, user=current_user, trans=trans ) return self.user_serializer.serialize_to_view( user_to_update, view='detailed' ) @expose_api @web.require_admin def delete( self, trans, id, **kwd ):
@expose_api @web.require_admin def undelete( self, trans, **kwd ): raise exceptions.NotImplemented() # TODO: move to more basal, common resource than this def anon_user_api_value( self, trans ): """ Returns data for an anonymous user, truncated to only usage and quota_percent """ usage = trans.app.quota_agent.get_usage( trans ) percent = trans.app.quota_agent.get_percent( trans=trans, usage=usage ) return {'total_disk_usage': int( usage ), 'nice_total_disk_usage': util.nice_size( usage ), 'quota_percent': percent}
""" DELETE /api/users/{id} delete the user with the given ``id`` :param id: the encoded id of the user to delete :type id: str :param purge: (optional) if True, purge the user :type purge: bool """ if not trans.app.config.allow_user_deletion: raise exceptions.ConfigDoesNotAllowException( 'The configuration of this Galaxy instance does not allow admins to delete users.' ) purge = util.string_as_bool(kwd.get('purge', False)) if purge: raise exceptions.NotImplemented('Purge option has not been implemented yet') user = self.get_user(trans, id) self.user_manager.delete(user) return self.user_serializer.serialize_to_view(user, view='detailed')
identifier_body
requestpool.go
// Copyright IBM Corp. All Rights Reserved. // // SPDX-License-Identifier: Apache-2.0 // package bft import ( "container/list" "context" "fmt" "sync" "time" "github.com/SmartBFT-Go/consensus/pkg/api" "github.com/SmartBFT-Go/consensus/pkg/metrics/disabled" "github.com/SmartBFT-Go/consensus/pkg/types" "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) const ( defaultRequestTimeout = 10 * time.Second // for unit tests only defaultMaxBytes = 100 * 1024 // default max request size would be of size 100Kb defaultSizeOfDelElements = 1000 // default size slice of delete elements defaultEraseTimeout = 5 * time.Second // for cicle erase silice of delete elements ) var ( ErrReqAlreadyExists = fmt.Errorf("request already exists") ErrReqAlreadyProcessed = fmt.Errorf("request already processed") ErrRequestTooBig = fmt.Errorf("submitted request is too big") ErrSubmitTimeout = fmt.Errorf("timeout submitting to request pool") ) //go:generate mockery -dir . -name RequestTimeoutHandler -case underscore -output ./mocks/ // RequestTimeoutHandler defines the methods called by request timeout timers created by time.AfterFunc. // This interface is implemented by the bft.Controller. type RequestTimeoutHandler interface { // OnRequestTimeout is called when a request timeout expires. OnRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnLeaderFwdRequestTimeout is called when a leader forwarding timeout expires. OnLeaderFwdRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnAutoRemoveTimeout is called when a auto-remove timeout expires. OnAutoRemoveTimeout(requestInfo types.RequestInfo) } // Pool implements requests pool, maintains pool of given size provided during // construction. In case there are more incoming request than given size it will // block during submit until there will be place to submit new ones. type Pool struct { logger api.Logger metrics *api.MetricsRequestPool inspector api.RequestInspector options PoolOptions cancel context.CancelFunc lock sync.RWMutex fifo *list.List semaphore *semaphore.Weighted existMap map[types.RequestInfo]*list.Element timeoutHandler RequestTimeoutHandler closed bool stopped bool submittedChan chan struct{} sizeBytes uint64 delMap map[types.RequestInfo]struct{} delSlice []types.RequestInfo } // requestItem captures request related information type requestItem struct { request []byte timeout *time.Timer additionTimestamp time.Time } // PoolOptions is the pool configuration type PoolOptions struct { QueueSize int64 ForwardTimeout time.Duration ComplainTimeout time.Duration AutoRemoveTimeout time.Duration RequestMaxBytes uint64 SubmitTimeout time.Duration Metrics *api.MetricsRequestPool } // NewPool constructs new requests pool func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool { if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } if options.RequestMaxBytes == 0 { options.RequestMaxBytes = defaultMaxBytes } if options.SubmitTimeout == 0 { options.SubmitTimeout = defaultRequestTimeout } if options.Metrics == nil { options.Metrics = api.NewMetricsRequestPool(&disabled.Provider{}) } ctx, cancel := context.WithCancel(context.Background()) rp := &Pool{ cancel: cancel, timeoutHandler: th, logger: log, metrics: options.Metrics, inspector: inspector, fifo: list.New(), semaphore: semaphore.NewWeighted(options.QueueSize), existMap: make(map[types.RequestInfo]*list.Element), options: options, submittedChan: submittedChan, delMap: make(map[types.RequestInfo]struct{}), delSlice: make([]types.RequestInfo, 0, defaultSizeOfDelElements), } go func() { tic := time.NewTicker(defaultEraseTimeout) for { select { case <-tic.C: rp.eraseFromDelSlice() case <-ctx.Done(): tic.Stop() return } } }() return rp } // ChangeTimeouts changes the timeout of the pool func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) { rp.lock.Lock() defer rp.lock.Unlock() if !rp.stopped { rp.logger.Errorf("Trying to change timeouts but the pool is not stopped") return } if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } rp.options.ForwardTimeout = options.ForwardTimeout rp.options.ComplainTimeout = options.ComplainTimeout rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout rp.timeoutHandler = th rp.logger.Debugf("Changed pool timeouts") } func (rp *Pool) isClosed() bool { rp.lock.Lock() defer rp.lock.Unlock() return rp.closed } // Submit a request into the pool, returns an error when request is already in the pool func (rp *Pool) Submit(request []byte) error { reqInfo := rp.inspector.RequestID(request) if rp.isClosed() { return errors.Errorf("pool closed, request rejected: %s", reqInfo) } if uint64(len(request)) > rp.options.RequestMaxBytes { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonRequestMaxBytes)..., ).Add(1) return fmt.Errorf( "submitted request (%d) is bigger than request max bytes (%d)", len(request), rp.options.RequestMaxBytes, ) } rp.lock.RLock() _, alreadyExists := rp.existMap[reqInfo] _, alreadyDelete := rp.delMap[reqInfo] rp.lock.RUnlock() if alreadyExists { rp.logger.Debugf("request %s already exists in the pool", reqInfo) return ErrReqAlreadyExists } if alreadyDelete { rp.logger.Debugf("request %s already processed", reqInfo) return ErrReqAlreadyProcessed } ctx, cancel := context.WithTimeout(context.Background(), rp.options.SubmitTimeout) defer cancel() // do not wait for a semaphore with a lock, as it will prevent draining the pool. if err := rp.semaphore.Acquire(ctx, 1); err != nil { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)..., ).Add(1) return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo) } reqCopy := append(make([]byte, 0), request...) rp.lock.Lock() defer rp.lock.Unlock() if _, existsEl := rp.existMap[reqInfo]; existsEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already added to the pool", reqInfo) return ErrReqAlreadyExists } if _, deleteEl := rp.delMap[reqInfo]; deleteEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already processed", reqInfo) return ErrReqAlreadyProcessed } to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(reqCopy, reqInfo) }, ) if rp.stopped { rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo) to.Stop() } reqItem := &requestItem{ request: reqCopy, timeout: to, additionTimestamp: time.Now(), } element := rp.fifo.PushBack(reqItem) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.CountOfRequestPoolAll.Add(1) rp.existMap[reqInfo] = element if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout) // notify that a request was submitted select { case rp.submittedChan <- struct{}{}: default: } rp.sizeBytes += uint64(len(element.Value.(*requestItem).request)) return nil } // Size returns the number of requests currently residing the pool func (rp *Pool) Size() int { rp.lock.Lock() defer rp.lock.Unlock() return len(rp.existMap) } // NextRequests returns the next requests to be batched. // It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice. // Return variable full indicates that the batch cannot be increased further by calling again with the same arguments. func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) { rp.lock.Lock() defer rp.lock.Unlock() if check { if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) { return nil, false } } count := minInt(rp.fifo.Len(), maxCount) var totalSize uint64 batch = make([][]byte, 0, count) element := rp.fifo.Front() for i := 0; i < count; i++ { req := element.Value.(*requestItem).request reqLen := uint64(len(req)) if totalSize+reqLen > maxSizeBytes { rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB", len(batch), totalSize, maxSizeBytes) return batch, true } batch = append(batch, req) totalSize += reqLen element = element.Next() } fullS := totalSize >= maxSizeBytes fullC := len(batch) == maxCount full = fullS || fullC if len(batch) > 0 { rp.logger.Debugf("Returning batch of %d requests totalling %dB", len(batch), totalSize) } return batch, full } // Prune removes requests for which the given predicate returns error. func (rp *Pool) Prune(predicate func([]byte) error) { reqVec, infoVec := rp.copyRequests() var numPruned int for i, req := range reqVec { err := predicate(req) if err == nil { continue } if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil { rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr) } else { rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err) numPruned++ } } rp.logger.Debugf("Pruned %d requests", numPruned) } func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) { rp.lock.Lock() defer rp.lock.Unlock() requestVec = make([][]byte, len(rp.existMap)) infoVec = make([]types.RequestInfo, len(rp.existMap)) var i int for info, item := range rp.existMap { infoVec[i] = info requestVec[i] = item.Value.(*requestItem).request i++ } return } // RemoveRequest removes the given request from the pool. func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error { rp.lock.Lock() defer rp.lock.Unlock() element, exist := rp.existMap[requestInfo] if !exist { rp.moveToDelSlice(requestInfo) errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo) rp.logger.Debugf(errStr) return fmt.Errorf(errStr) } rp.deleteRequest(element, requestInfo) rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request)) return nil } func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) { item := element.Value.(*requestItem) item.timeout.Stop() rp.fifo.Remove(element) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.LatencyOfRequestPool.Observe(time.Since(item.additionTimestamp).Seconds()) delete(rp.existMap, requestInfo) rp.moveToDelSlice(requestInfo) rp.logger.Infof("Removed request %s from request pool", requestInfo) rp.semaphore.Release(1) if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } } func (rp *Pool) moveToDelSlice(requestInfo types.RequestInfo) { _, exist := rp.delMap[requestInfo] if exist { return } rp.delMap[requestInfo] = struct{}{} rp.delSlice = append(rp.delSlice, requestInfo) } func (rp *Pool) eraseFromDelSlice() { rp.lock.RLock() l := len(rp.delSlice) rp.lock.RUnlock() if l <= defaultSizeOfDelElements { return } rp.lock.Lock() defer rp.lock.Unlock() n := len(rp.delSlice) - defaultSizeOfDelElements for _, r := range rp.delSlice[:n] { delete(rp.delMap, r) } rp.delSlice = rp.delSlice[n:] } // Close removes all the requests, stops all the timeout timers. func (rp *Pool) Close() { rp.lock.Lock() defer rp.lock.Unlock() rp.closed = true for requestInfo, element := range rp.existMap { rp.deleteRequest(element, requestInfo) } rp.cancel() } // StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped". // This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running // at the time of the call to StopTimers(). func (rp *Pool) StopTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = true for _, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() } rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap)) } // RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows // submission of new requests. func (rp *Pool) RestartTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = false for reqInfo, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() ri := reqInfo to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(item.request, ri) }, ) item.timeout = to } rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap)) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start a leader-forwarding timeout") return } // start a second timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.ComplainTimeout, func() { rp.onLeaderFwdRequestTO(request, reqInfo) }, ) rp.logger.Debugf("Request %s; started a leader-forwarding timeout: %s", reqInfo, rp.options.ComplainTimeout) rp.lock.Unlock() // may take time, in case Comm channel to leader is full; hence w/o the lock. rp.logger.Debugf("Request %s timeout expired, going to send to leader", reqInfo) rp.metrics.CountOfLeaderForwardRequest.Add(1) rp.timeoutHandler.OnRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onLeaderFwdRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start auto-remove timeout") return } // start a third timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.AutoRemoveTimeout, func() { rp.onAutoRemoveTO(reqInfo) }, ) rp.logger.Debugf("Request %s; started auto-remove timeout: %s", reqInfo, rp.options.AutoRemoveTimeout) rp.lock.Unlock() // may take time, in case Comm channel is full; hence w/o the lock. rp.logger.Debugf("Request %s leader-forwarding timeout expired, going to complain on leader", reqInfo) rp.metrics.CountTimeoutTwoStep.Add(1) rp.timeoutHandler.OnLeaderFwdRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onAutoRemoveTO(reqInfo types.RequestInfo)
{ rp.logger.Debugf("Request %s auto-remove timeout expired, going to remove from pool", reqInfo) if err := rp.RemoveRequest(reqInfo); err != nil { rp.logger.Errorf("Removal of request %s failed; error: %s", reqInfo, err) return } rp.metrics.CountOfDeleteRequestPool.Add(1) rp.timeoutHandler.OnAutoRemoveTimeout(reqInfo) }
identifier_body
requestpool.go
// Copyright IBM Corp. All Rights Reserved. // // SPDX-License-Identifier: Apache-2.0 // package bft import ( "container/list" "context" "fmt" "sync" "time" "github.com/SmartBFT-Go/consensus/pkg/api" "github.com/SmartBFT-Go/consensus/pkg/metrics/disabled" "github.com/SmartBFT-Go/consensus/pkg/types" "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) const ( defaultRequestTimeout = 10 * time.Second // for unit tests only defaultMaxBytes = 100 * 1024 // default max request size would be of size 100Kb defaultSizeOfDelElements = 1000 // default size slice of delete elements defaultEraseTimeout = 5 * time.Second // for cicle erase silice of delete elements ) var ( ErrReqAlreadyExists = fmt.Errorf("request already exists") ErrReqAlreadyProcessed = fmt.Errorf("request already processed") ErrRequestTooBig = fmt.Errorf("submitted request is too big") ErrSubmitTimeout = fmt.Errorf("timeout submitting to request pool") ) //go:generate mockery -dir . -name RequestTimeoutHandler -case underscore -output ./mocks/ // RequestTimeoutHandler defines the methods called by request timeout timers created by time.AfterFunc. // This interface is implemented by the bft.Controller. type RequestTimeoutHandler interface { // OnRequestTimeout is called when a request timeout expires. OnRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnLeaderFwdRequestTimeout is called when a leader forwarding timeout expires. OnLeaderFwdRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnAutoRemoveTimeout is called when a auto-remove timeout expires. OnAutoRemoveTimeout(requestInfo types.RequestInfo) } // Pool implements requests pool, maintains pool of given size provided during // construction. In case there are more incoming request than given size it will // block during submit until there will be place to submit new ones. type Pool struct { logger api.Logger metrics *api.MetricsRequestPool inspector api.RequestInspector options PoolOptions cancel context.CancelFunc lock sync.RWMutex fifo *list.List semaphore *semaphore.Weighted existMap map[types.RequestInfo]*list.Element timeoutHandler RequestTimeoutHandler closed bool stopped bool submittedChan chan struct{} sizeBytes uint64 delMap map[types.RequestInfo]struct{} delSlice []types.RequestInfo } // requestItem captures request related information type requestItem struct { request []byte timeout *time.Timer additionTimestamp time.Time } // PoolOptions is the pool configuration type PoolOptions struct { QueueSize int64 ForwardTimeout time.Duration ComplainTimeout time.Duration AutoRemoveTimeout time.Duration RequestMaxBytes uint64 SubmitTimeout time.Duration Metrics *api.MetricsRequestPool } // NewPool constructs new requests pool func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool { if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } if options.RequestMaxBytes == 0 { options.RequestMaxBytes = defaultMaxBytes } if options.SubmitTimeout == 0 { options.SubmitTimeout = defaultRequestTimeout } if options.Metrics == nil { options.Metrics = api.NewMetricsRequestPool(&disabled.Provider{}) } ctx, cancel := context.WithCancel(context.Background()) rp := &Pool{ cancel: cancel, timeoutHandler: th, logger: log, metrics: options.Metrics, inspector: inspector, fifo: list.New(), semaphore: semaphore.NewWeighted(options.QueueSize), existMap: make(map[types.RequestInfo]*list.Element), options: options, submittedChan: submittedChan, delMap: make(map[types.RequestInfo]struct{}), delSlice: make([]types.RequestInfo, 0, defaultSizeOfDelElements), } go func() { tic := time.NewTicker(defaultEraseTimeout) for { select { case <-tic.C: rp.eraseFromDelSlice() case <-ctx.Done(): tic.Stop() return } } }() return rp } // ChangeTimeouts changes the timeout of the pool func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) { rp.lock.Lock() defer rp.lock.Unlock() if !rp.stopped { rp.logger.Errorf("Trying to change timeouts but the pool is not stopped") return } if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } rp.options.ForwardTimeout = options.ForwardTimeout rp.options.ComplainTimeout = options.ComplainTimeout rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout rp.timeoutHandler = th rp.logger.Debugf("Changed pool timeouts") } func (rp *Pool) isClosed() bool { rp.lock.Lock() defer rp.lock.Unlock() return rp.closed } // Submit a request into the pool, returns an error when request is already in the pool func (rp *Pool) Submit(request []byte) error { reqInfo := rp.inspector.RequestID(request) if rp.isClosed() { return errors.Errorf("pool closed, request rejected: %s", reqInfo) } if uint64(len(request)) > rp.options.RequestMaxBytes { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonRequestMaxBytes)..., ).Add(1) return fmt.Errorf( "submitted request (%d) is bigger than request max bytes (%d)", len(request), rp.options.RequestMaxBytes, ) } rp.lock.RLock() _, alreadyExists := rp.existMap[reqInfo] _, alreadyDelete := rp.delMap[reqInfo] rp.lock.RUnlock() if alreadyExists { rp.logger.Debugf("request %s already exists in the pool", reqInfo) return ErrReqAlreadyExists } if alreadyDelete { rp.logger.Debugf("request %s already processed", reqInfo) return ErrReqAlreadyProcessed } ctx, cancel := context.WithTimeout(context.Background(), rp.options.SubmitTimeout) defer cancel() // do not wait for a semaphore with a lock, as it will prevent draining the pool. if err := rp.semaphore.Acquire(ctx, 1); err != nil { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)..., ).Add(1) return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo) } reqCopy := append(make([]byte, 0), request...) rp.lock.Lock() defer rp.lock.Unlock() if _, existsEl := rp.existMap[reqInfo]; existsEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already added to the pool", reqInfo) return ErrReqAlreadyExists } if _, deleteEl := rp.delMap[reqInfo]; deleteEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already processed", reqInfo) return ErrReqAlreadyProcessed } to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(reqCopy, reqInfo) }, ) if rp.stopped { rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo) to.Stop() } reqItem := &requestItem{ request: reqCopy, timeout: to, additionTimestamp: time.Now(), } element := rp.fifo.PushBack(reqItem) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.CountOfRequestPoolAll.Add(1) rp.existMap[reqInfo] = element if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout) // notify that a request was submitted select { case rp.submittedChan <- struct{}{}: default: } rp.sizeBytes += uint64(len(element.Value.(*requestItem).request)) return nil } // Size returns the number of requests currently residing the pool func (rp *Pool) Size() int { rp.lock.Lock() defer rp.lock.Unlock() return len(rp.existMap) } // NextRequests returns the next requests to be batched. // It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice. // Return variable full indicates that the batch cannot be increased further by calling again with the same arguments. func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) { rp.lock.Lock() defer rp.lock.Unlock() if check { if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) { return nil, false } } count := minInt(rp.fifo.Len(), maxCount) var totalSize uint64 batch = make([][]byte, 0, count) element := rp.fifo.Front() for i := 0; i < count; i++ { req := element.Value.(*requestItem).request reqLen := uint64(len(req)) if totalSize+reqLen > maxSizeBytes { rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB", len(batch), totalSize, maxSizeBytes) return batch, true } batch = append(batch, req) totalSize += reqLen element = element.Next() } fullS := totalSize >= maxSizeBytes fullC := len(batch) == maxCount full = fullS || fullC if len(batch) > 0 { rp.logger.Debugf("Returning batch of %d requests totalling %dB", len(batch), totalSize) } return batch, full } // Prune removes requests for which the given predicate returns error. func (rp *Pool) Prune(predicate func([]byte) error) { reqVec, infoVec := rp.copyRequests() var numPruned int for i, req := range reqVec { err := predicate(req) if err == nil { continue } if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil { rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr) } else { rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err) numPruned++ } } rp.logger.Debugf("Pruned %d requests", numPruned) } func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) { rp.lock.Lock() defer rp.lock.Unlock() requestVec = make([][]byte, len(rp.existMap)) infoVec = make([]types.RequestInfo, len(rp.existMap)) var i int for info, item := range rp.existMap { infoVec[i] = info requestVec[i] = item.Value.(*requestItem).request i++ } return } // RemoveRequest removes the given request from the pool. func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error { rp.lock.Lock() defer rp.lock.Unlock() element, exist := rp.existMap[requestInfo] if !exist { rp.moveToDelSlice(requestInfo) errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo) rp.logger.Debugf(errStr) return fmt.Errorf(errStr) } rp.deleteRequest(element, requestInfo) rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request)) return nil } func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) { item := element.Value.(*requestItem) item.timeout.Stop() rp.fifo.Remove(element) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.LatencyOfRequestPool.Observe(time.Since(item.additionTimestamp).Seconds()) delete(rp.existMap, requestInfo) rp.moveToDelSlice(requestInfo) rp.logger.Infof("Removed request %s from request pool", requestInfo) rp.semaphore.Release(1) if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } } func (rp *Pool) moveToDelSlice(requestInfo types.RequestInfo) { _, exist := rp.delMap[requestInfo] if exist { return } rp.delMap[requestInfo] = struct{}{} rp.delSlice = append(rp.delSlice, requestInfo) } func (rp *Pool) eraseFromDelSlice() { rp.lock.RLock() l := len(rp.delSlice) rp.lock.RUnlock() if l <= defaultSizeOfDelElements { return } rp.lock.Lock() defer rp.lock.Unlock() n := len(rp.delSlice) - defaultSizeOfDelElements for _, r := range rp.delSlice[:n] { delete(rp.delMap, r) } rp.delSlice = rp.delSlice[n:] } // Close removes all the requests, stops all the timeout timers. func (rp *Pool) Close() { rp.lock.Lock() defer rp.lock.Unlock() rp.closed = true for requestInfo, element := range rp.existMap { rp.deleteRequest(element, requestInfo) } rp.cancel() } // StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped". // This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running // at the time of the call to StopTimers(). func (rp *Pool) StopTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = true for _, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() } rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap)) } // RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows // submission of new requests. func (rp *Pool) RestartTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = false for reqInfo, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() ri := reqInfo to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(item.request, ri) }, ) item.timeout = to } rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap)) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return }
if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start a leader-forwarding timeout") return } // start a second timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.ComplainTimeout, func() { rp.onLeaderFwdRequestTO(request, reqInfo) }, ) rp.logger.Debugf("Request %s; started a leader-forwarding timeout: %s", reqInfo, rp.options.ComplainTimeout) rp.lock.Unlock() // may take time, in case Comm channel to leader is full; hence w/o the lock. rp.logger.Debugf("Request %s timeout expired, going to send to leader", reqInfo) rp.metrics.CountOfLeaderForwardRequest.Add(1) rp.timeoutHandler.OnRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onLeaderFwdRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start auto-remove timeout") return } // start a third timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.AutoRemoveTimeout, func() { rp.onAutoRemoveTO(reqInfo) }, ) rp.logger.Debugf("Request %s; started auto-remove timeout: %s", reqInfo, rp.options.AutoRemoveTimeout) rp.lock.Unlock() // may take time, in case Comm channel is full; hence w/o the lock. rp.logger.Debugf("Request %s leader-forwarding timeout expired, going to complain on leader", reqInfo) rp.metrics.CountTimeoutTwoStep.Add(1) rp.timeoutHandler.OnLeaderFwdRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onAutoRemoveTO(reqInfo types.RequestInfo) { rp.logger.Debugf("Request %s auto-remove timeout expired, going to remove from pool", reqInfo) if err := rp.RemoveRequest(reqInfo); err != nil { rp.logger.Errorf("Removal of request %s failed; error: %s", reqInfo, err) return } rp.metrics.CountOfDeleteRequestPool.Add(1) rp.timeoutHandler.OnAutoRemoveTimeout(reqInfo) }
random_line_split
requestpool.go
// Copyright IBM Corp. All Rights Reserved. // // SPDX-License-Identifier: Apache-2.0 // package bft import ( "container/list" "context" "fmt" "sync" "time" "github.com/SmartBFT-Go/consensus/pkg/api" "github.com/SmartBFT-Go/consensus/pkg/metrics/disabled" "github.com/SmartBFT-Go/consensus/pkg/types" "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) const ( defaultRequestTimeout = 10 * time.Second // for unit tests only defaultMaxBytes = 100 * 1024 // default max request size would be of size 100Kb defaultSizeOfDelElements = 1000 // default size slice of delete elements defaultEraseTimeout = 5 * time.Second // for cicle erase silice of delete elements ) var ( ErrReqAlreadyExists = fmt.Errorf("request already exists") ErrReqAlreadyProcessed = fmt.Errorf("request already processed") ErrRequestTooBig = fmt.Errorf("submitted request is too big") ErrSubmitTimeout = fmt.Errorf("timeout submitting to request pool") ) //go:generate mockery -dir . -name RequestTimeoutHandler -case underscore -output ./mocks/ // RequestTimeoutHandler defines the methods called by request timeout timers created by time.AfterFunc. // This interface is implemented by the bft.Controller. type RequestTimeoutHandler interface { // OnRequestTimeout is called when a request timeout expires. OnRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnLeaderFwdRequestTimeout is called when a leader forwarding timeout expires. OnLeaderFwdRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnAutoRemoveTimeout is called when a auto-remove timeout expires. OnAutoRemoveTimeout(requestInfo types.RequestInfo) } // Pool implements requests pool, maintains pool of given size provided during // construction. In case there are more incoming request than given size it will // block during submit until there will be place to submit new ones. type Pool struct { logger api.Logger metrics *api.MetricsRequestPool inspector api.RequestInspector options PoolOptions cancel context.CancelFunc lock sync.RWMutex fifo *list.List semaphore *semaphore.Weighted existMap map[types.RequestInfo]*list.Element timeoutHandler RequestTimeoutHandler closed bool stopped bool submittedChan chan struct{} sizeBytes uint64 delMap map[types.RequestInfo]struct{} delSlice []types.RequestInfo } // requestItem captures request related information type requestItem struct { request []byte timeout *time.Timer additionTimestamp time.Time } // PoolOptions is the pool configuration type PoolOptions struct { QueueSize int64 ForwardTimeout time.Duration ComplainTimeout time.Duration AutoRemoveTimeout time.Duration RequestMaxBytes uint64 SubmitTimeout time.Duration Metrics *api.MetricsRequestPool } // NewPool constructs new requests pool func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool { if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } if options.RequestMaxBytes == 0 { options.RequestMaxBytes = defaultMaxBytes } if options.SubmitTimeout == 0 { options.SubmitTimeout = defaultRequestTimeout } if options.Metrics == nil { options.Metrics = api.NewMetricsRequestPool(&disabled.Provider{}) } ctx, cancel := context.WithCancel(context.Background()) rp := &Pool{ cancel: cancel, timeoutHandler: th, logger: log, metrics: options.Metrics, inspector: inspector, fifo: list.New(), semaphore: semaphore.NewWeighted(options.QueueSize), existMap: make(map[types.RequestInfo]*list.Element), options: options, submittedChan: submittedChan, delMap: make(map[types.RequestInfo]struct{}), delSlice: make([]types.RequestInfo, 0, defaultSizeOfDelElements), } go func() { tic := time.NewTicker(defaultEraseTimeout) for { select { case <-tic.C: rp.eraseFromDelSlice() case <-ctx.Done(): tic.Stop() return } } }() return rp } // ChangeTimeouts changes the timeout of the pool func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) { rp.lock.Lock() defer rp.lock.Unlock() if !rp.stopped { rp.logger.Errorf("Trying to change timeouts but the pool is not stopped") return } if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } rp.options.ForwardTimeout = options.ForwardTimeout rp.options.ComplainTimeout = options.ComplainTimeout rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout rp.timeoutHandler = th rp.logger.Debugf("Changed pool timeouts") } func (rp *Pool) isClosed() bool { rp.lock.Lock() defer rp.lock.Unlock() return rp.closed } // Submit a request into the pool, returns an error when request is already in the pool func (rp *Pool) Submit(request []byte) error { reqInfo := rp.inspector.RequestID(request) if rp.isClosed() { return errors.Errorf("pool closed, request rejected: %s", reqInfo) } if uint64(len(request)) > rp.options.RequestMaxBytes { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonRequestMaxBytes)..., ).Add(1) return fmt.Errorf( "submitted request (%d) is bigger than request max bytes (%d)", len(request), rp.options.RequestMaxBytes, ) } rp.lock.RLock() _, alreadyExists := rp.existMap[reqInfo] _, alreadyDelete := rp.delMap[reqInfo] rp.lock.RUnlock() if alreadyExists { rp.logger.Debugf("request %s already exists in the pool", reqInfo) return ErrReqAlreadyExists } if alreadyDelete { rp.logger.Debugf("request %s already processed", reqInfo) return ErrReqAlreadyProcessed } ctx, cancel := context.WithTimeout(context.Background(), rp.options.SubmitTimeout) defer cancel() // do not wait for a semaphore with a lock, as it will prevent draining the pool. if err := rp.semaphore.Acquire(ctx, 1); err != nil { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)..., ).Add(1) return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo) } reqCopy := append(make([]byte, 0), request...) rp.lock.Lock() defer rp.lock.Unlock() if _, existsEl := rp.existMap[reqInfo]; existsEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already added to the pool", reqInfo) return ErrReqAlreadyExists } if _, deleteEl := rp.delMap[reqInfo]; deleteEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already processed", reqInfo) return ErrReqAlreadyProcessed } to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(reqCopy, reqInfo) }, ) if rp.stopped { rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo) to.Stop() } reqItem := &requestItem{ request: reqCopy, timeout: to, additionTimestamp: time.Now(), } element := rp.fifo.PushBack(reqItem) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.CountOfRequestPoolAll.Add(1) rp.existMap[reqInfo] = element if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout) // notify that a request was submitted select { case rp.submittedChan <- struct{}{}: default: } rp.sizeBytes += uint64(len(element.Value.(*requestItem).request)) return nil } // Size returns the number of requests currently residing the pool func (rp *Pool) Size() int { rp.lock.Lock() defer rp.lock.Unlock() return len(rp.existMap) } // NextRequests returns the next requests to be batched. // It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice. // Return variable full indicates that the batch cannot be increased further by calling again with the same arguments. func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) { rp.lock.Lock() defer rp.lock.Unlock() if check { if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes)
} count := minInt(rp.fifo.Len(), maxCount) var totalSize uint64 batch = make([][]byte, 0, count) element := rp.fifo.Front() for i := 0; i < count; i++ { req := element.Value.(*requestItem).request reqLen := uint64(len(req)) if totalSize+reqLen > maxSizeBytes { rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB", len(batch), totalSize, maxSizeBytes) return batch, true } batch = append(batch, req) totalSize += reqLen element = element.Next() } fullS := totalSize >= maxSizeBytes fullC := len(batch) == maxCount full = fullS || fullC if len(batch) > 0 { rp.logger.Debugf("Returning batch of %d requests totalling %dB", len(batch), totalSize) } return batch, full } // Prune removes requests for which the given predicate returns error. func (rp *Pool) Prune(predicate func([]byte) error) { reqVec, infoVec := rp.copyRequests() var numPruned int for i, req := range reqVec { err := predicate(req) if err == nil { continue } if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil { rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr) } else { rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err) numPruned++ } } rp.logger.Debugf("Pruned %d requests", numPruned) } func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) { rp.lock.Lock() defer rp.lock.Unlock() requestVec = make([][]byte, len(rp.existMap)) infoVec = make([]types.RequestInfo, len(rp.existMap)) var i int for info, item := range rp.existMap { infoVec[i] = info requestVec[i] = item.Value.(*requestItem).request i++ } return } // RemoveRequest removes the given request from the pool. func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error { rp.lock.Lock() defer rp.lock.Unlock() element, exist := rp.existMap[requestInfo] if !exist { rp.moveToDelSlice(requestInfo) errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo) rp.logger.Debugf(errStr) return fmt.Errorf(errStr) } rp.deleteRequest(element, requestInfo) rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request)) return nil } func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) { item := element.Value.(*requestItem) item.timeout.Stop() rp.fifo.Remove(element) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.LatencyOfRequestPool.Observe(time.Since(item.additionTimestamp).Seconds()) delete(rp.existMap, requestInfo) rp.moveToDelSlice(requestInfo) rp.logger.Infof("Removed request %s from request pool", requestInfo) rp.semaphore.Release(1) if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } } func (rp *Pool) moveToDelSlice(requestInfo types.RequestInfo) { _, exist := rp.delMap[requestInfo] if exist { return } rp.delMap[requestInfo] = struct{}{} rp.delSlice = append(rp.delSlice, requestInfo) } func (rp *Pool) eraseFromDelSlice() { rp.lock.RLock() l := len(rp.delSlice) rp.lock.RUnlock() if l <= defaultSizeOfDelElements { return } rp.lock.Lock() defer rp.lock.Unlock() n := len(rp.delSlice) - defaultSizeOfDelElements for _, r := range rp.delSlice[:n] { delete(rp.delMap, r) } rp.delSlice = rp.delSlice[n:] } // Close removes all the requests, stops all the timeout timers. func (rp *Pool) Close() { rp.lock.Lock() defer rp.lock.Unlock() rp.closed = true for requestInfo, element := range rp.existMap { rp.deleteRequest(element, requestInfo) } rp.cancel() } // StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped". // This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running // at the time of the call to StopTimers(). func (rp *Pool) StopTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = true for _, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() } rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap)) } // RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows // submission of new requests. func (rp *Pool) RestartTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = false for reqInfo, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() ri := reqInfo to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(item.request, ri) }, ) item.timeout = to } rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap)) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start a leader-forwarding timeout") return } // start a second timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.ComplainTimeout, func() { rp.onLeaderFwdRequestTO(request, reqInfo) }, ) rp.logger.Debugf("Request %s; started a leader-forwarding timeout: %s", reqInfo, rp.options.ComplainTimeout) rp.lock.Unlock() // may take time, in case Comm channel to leader is full; hence w/o the lock. rp.logger.Debugf("Request %s timeout expired, going to send to leader", reqInfo) rp.metrics.CountOfLeaderForwardRequest.Add(1) rp.timeoutHandler.OnRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onLeaderFwdRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start auto-remove timeout") return } // start a third timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.AutoRemoveTimeout, func() { rp.onAutoRemoveTO(reqInfo) }, ) rp.logger.Debugf("Request %s; started auto-remove timeout: %s", reqInfo, rp.options.AutoRemoveTimeout) rp.lock.Unlock() // may take time, in case Comm channel is full; hence w/o the lock. rp.logger.Debugf("Request %s leader-forwarding timeout expired, going to complain on leader", reqInfo) rp.metrics.CountTimeoutTwoStep.Add(1) rp.timeoutHandler.OnLeaderFwdRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onAutoRemoveTO(reqInfo types.RequestInfo) { rp.logger.Debugf("Request %s auto-remove timeout expired, going to remove from pool", reqInfo) if err := rp.RemoveRequest(reqInfo); err != nil { rp.logger.Errorf("Removal of request %s failed; error: %s", reqInfo, err) return } rp.metrics.CountOfDeleteRequestPool.Add(1) rp.timeoutHandler.OnAutoRemoveTimeout(reqInfo) }
{ return nil, false }
conditional_block
requestpool.go
// Copyright IBM Corp. All Rights Reserved. // // SPDX-License-Identifier: Apache-2.0 // package bft import ( "container/list" "context" "fmt" "sync" "time" "github.com/SmartBFT-Go/consensus/pkg/api" "github.com/SmartBFT-Go/consensus/pkg/metrics/disabled" "github.com/SmartBFT-Go/consensus/pkg/types" "github.com/pkg/errors" "golang.org/x/sync/semaphore" ) const ( defaultRequestTimeout = 10 * time.Second // for unit tests only defaultMaxBytes = 100 * 1024 // default max request size would be of size 100Kb defaultSizeOfDelElements = 1000 // default size slice of delete elements defaultEraseTimeout = 5 * time.Second // for cicle erase silice of delete elements ) var ( ErrReqAlreadyExists = fmt.Errorf("request already exists") ErrReqAlreadyProcessed = fmt.Errorf("request already processed") ErrRequestTooBig = fmt.Errorf("submitted request is too big") ErrSubmitTimeout = fmt.Errorf("timeout submitting to request pool") ) //go:generate mockery -dir . -name RequestTimeoutHandler -case underscore -output ./mocks/ // RequestTimeoutHandler defines the methods called by request timeout timers created by time.AfterFunc. // This interface is implemented by the bft.Controller. type RequestTimeoutHandler interface { // OnRequestTimeout is called when a request timeout expires. OnRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnLeaderFwdRequestTimeout is called when a leader forwarding timeout expires. OnLeaderFwdRequestTimeout(request []byte, requestInfo types.RequestInfo) // OnAutoRemoveTimeout is called when a auto-remove timeout expires. OnAutoRemoveTimeout(requestInfo types.RequestInfo) } // Pool implements requests pool, maintains pool of given size provided during // construction. In case there are more incoming request than given size it will // block during submit until there will be place to submit new ones. type Pool struct { logger api.Logger metrics *api.MetricsRequestPool inspector api.RequestInspector options PoolOptions cancel context.CancelFunc lock sync.RWMutex fifo *list.List semaphore *semaphore.Weighted existMap map[types.RequestInfo]*list.Element timeoutHandler RequestTimeoutHandler closed bool stopped bool submittedChan chan struct{} sizeBytes uint64 delMap map[types.RequestInfo]struct{} delSlice []types.RequestInfo } // requestItem captures request related information type requestItem struct { request []byte timeout *time.Timer additionTimestamp time.Time } // PoolOptions is the pool configuration type PoolOptions struct { QueueSize int64 ForwardTimeout time.Duration ComplainTimeout time.Duration AutoRemoveTimeout time.Duration RequestMaxBytes uint64 SubmitTimeout time.Duration Metrics *api.MetricsRequestPool } // NewPool constructs new requests pool func NewPool(log api.Logger, inspector api.RequestInspector, th RequestTimeoutHandler, options PoolOptions, submittedChan chan struct{}) *Pool { if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } if options.RequestMaxBytes == 0 { options.RequestMaxBytes = defaultMaxBytes } if options.SubmitTimeout == 0 { options.SubmitTimeout = defaultRequestTimeout } if options.Metrics == nil { options.Metrics = api.NewMetricsRequestPool(&disabled.Provider{}) } ctx, cancel := context.WithCancel(context.Background()) rp := &Pool{ cancel: cancel, timeoutHandler: th, logger: log, metrics: options.Metrics, inspector: inspector, fifo: list.New(), semaphore: semaphore.NewWeighted(options.QueueSize), existMap: make(map[types.RequestInfo]*list.Element), options: options, submittedChan: submittedChan, delMap: make(map[types.RequestInfo]struct{}), delSlice: make([]types.RequestInfo, 0, defaultSizeOfDelElements), } go func() { tic := time.NewTicker(defaultEraseTimeout) for { select { case <-tic.C: rp.eraseFromDelSlice() case <-ctx.Done(): tic.Stop() return } } }() return rp } // ChangeTimeouts changes the timeout of the pool func (rp *Pool) ChangeTimeouts(th RequestTimeoutHandler, options PoolOptions) { rp.lock.Lock() defer rp.lock.Unlock() if !rp.stopped { rp.logger.Errorf("Trying to change timeouts but the pool is not stopped") return } if options.ForwardTimeout == 0 { options.ForwardTimeout = defaultRequestTimeout } if options.ComplainTimeout == 0 { options.ComplainTimeout = defaultRequestTimeout } if options.AutoRemoveTimeout == 0 { options.AutoRemoveTimeout = defaultRequestTimeout } rp.options.ForwardTimeout = options.ForwardTimeout rp.options.ComplainTimeout = options.ComplainTimeout rp.options.AutoRemoveTimeout = options.AutoRemoveTimeout rp.timeoutHandler = th rp.logger.Debugf("Changed pool timeouts") } func (rp *Pool) isClosed() bool { rp.lock.Lock() defer rp.lock.Unlock() return rp.closed } // Submit a request into the pool, returns an error when request is already in the pool func (rp *Pool) Submit(request []byte) error { reqInfo := rp.inspector.RequestID(request) if rp.isClosed() { return errors.Errorf("pool closed, request rejected: %s", reqInfo) } if uint64(len(request)) > rp.options.RequestMaxBytes { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonRequestMaxBytes)..., ).Add(1) return fmt.Errorf( "submitted request (%d) is bigger than request max bytes (%d)", len(request), rp.options.RequestMaxBytes, ) } rp.lock.RLock() _, alreadyExists := rp.existMap[reqInfo] _, alreadyDelete := rp.delMap[reqInfo] rp.lock.RUnlock() if alreadyExists { rp.logger.Debugf("request %s already exists in the pool", reqInfo) return ErrReqAlreadyExists } if alreadyDelete { rp.logger.Debugf("request %s already processed", reqInfo) return ErrReqAlreadyProcessed } ctx, cancel := context.WithTimeout(context.Background(), rp.options.SubmitTimeout) defer cancel() // do not wait for a semaphore with a lock, as it will prevent draining the pool. if err := rp.semaphore.Acquire(ctx, 1); err != nil { rp.metrics.CountOfFailAddRequestToPool.With( rp.metrics.LabelsForWith(api.NameReasonFailAdd, api.ReasonSemaphoreAcquireFail)..., ).Add(1) return errors.Wrapf(err, "acquiring semaphore for request: %s", reqInfo) } reqCopy := append(make([]byte, 0), request...) rp.lock.Lock() defer rp.lock.Unlock() if _, existsEl := rp.existMap[reqInfo]; existsEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already added to the pool", reqInfo) return ErrReqAlreadyExists } if _, deleteEl := rp.delMap[reqInfo]; deleteEl { rp.semaphore.Release(1) rp.logger.Debugf("request %s has been already processed", reqInfo) return ErrReqAlreadyProcessed } to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(reqCopy, reqInfo) }, ) if rp.stopped { rp.logger.Debugf("pool stopped, submitting with a stopped timer, request: %s", reqInfo) to.Stop() } reqItem := &requestItem{ request: reqCopy, timeout: to, additionTimestamp: time.Now(), } element := rp.fifo.PushBack(reqItem) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.CountOfRequestPoolAll.Add(1) rp.existMap[reqInfo] = element if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } rp.logger.Debugf("Request %s submitted; started a timeout: %s", reqInfo, rp.options.ForwardTimeout) // notify that a request was submitted select { case rp.submittedChan <- struct{}{}: default: } rp.sizeBytes += uint64(len(element.Value.(*requestItem).request)) return nil } // Size returns the number of requests currently residing the pool func (rp *Pool)
() int { rp.lock.Lock() defer rp.lock.Unlock() return len(rp.existMap) } // NextRequests returns the next requests to be batched. // It returns at most maxCount requests, and at most maxSizeBytes, in a newly allocated slice. // Return variable full indicates that the batch cannot be increased further by calling again with the same arguments. func (rp *Pool) NextRequests(maxCount int, maxSizeBytes uint64, check bool) (batch [][]byte, full bool) { rp.lock.Lock() defer rp.lock.Unlock() if check { if (len(rp.existMap) < maxCount) && (rp.sizeBytes < maxSizeBytes) { return nil, false } } count := minInt(rp.fifo.Len(), maxCount) var totalSize uint64 batch = make([][]byte, 0, count) element := rp.fifo.Front() for i := 0; i < count; i++ { req := element.Value.(*requestItem).request reqLen := uint64(len(req)) if totalSize+reqLen > maxSizeBytes { rp.logger.Debugf("Returning batch of %d requests totalling %dB as it exceeds threshold of %dB", len(batch), totalSize, maxSizeBytes) return batch, true } batch = append(batch, req) totalSize += reqLen element = element.Next() } fullS := totalSize >= maxSizeBytes fullC := len(batch) == maxCount full = fullS || fullC if len(batch) > 0 { rp.logger.Debugf("Returning batch of %d requests totalling %dB", len(batch), totalSize) } return batch, full } // Prune removes requests for which the given predicate returns error. func (rp *Pool) Prune(predicate func([]byte) error) { reqVec, infoVec := rp.copyRequests() var numPruned int for i, req := range reqVec { err := predicate(req) if err == nil { continue } if remErr := rp.RemoveRequest(infoVec[i]); remErr != nil { rp.logger.Debugf("Failed to prune request: %s; predicate error: %s; remove error: %s", infoVec[i], err, remErr) } else { rp.logger.Debugf("Pruned request: %s; predicate error: %s", infoVec[i], err) numPruned++ } } rp.logger.Debugf("Pruned %d requests", numPruned) } func (rp *Pool) copyRequests() (requestVec [][]byte, infoVec []types.RequestInfo) { rp.lock.Lock() defer rp.lock.Unlock() requestVec = make([][]byte, len(rp.existMap)) infoVec = make([]types.RequestInfo, len(rp.existMap)) var i int for info, item := range rp.existMap { infoVec[i] = info requestVec[i] = item.Value.(*requestItem).request i++ } return } // RemoveRequest removes the given request from the pool. func (rp *Pool) RemoveRequest(requestInfo types.RequestInfo) error { rp.lock.Lock() defer rp.lock.Unlock() element, exist := rp.existMap[requestInfo] if !exist { rp.moveToDelSlice(requestInfo) errStr := fmt.Sprintf("request %s is not in the pool at remove time", requestInfo) rp.logger.Debugf(errStr) return fmt.Errorf(errStr) } rp.deleteRequest(element, requestInfo) rp.sizeBytes -= uint64(len(element.Value.(*requestItem).request)) return nil } func (rp *Pool) deleteRequest(element *list.Element, requestInfo types.RequestInfo) { item := element.Value.(*requestItem) item.timeout.Stop() rp.fifo.Remove(element) rp.metrics.CountOfRequestPool.Set(float64(rp.fifo.Len())) rp.metrics.LatencyOfRequestPool.Observe(time.Since(item.additionTimestamp).Seconds()) delete(rp.existMap, requestInfo) rp.moveToDelSlice(requestInfo) rp.logger.Infof("Removed request %s from request pool", requestInfo) rp.semaphore.Release(1) if len(rp.existMap) != rp.fifo.Len() { rp.logger.Panicf("RequestPool map and list are of different length: map=%d, list=%d", len(rp.existMap), rp.fifo.Len()) } } func (rp *Pool) moveToDelSlice(requestInfo types.RequestInfo) { _, exist := rp.delMap[requestInfo] if exist { return } rp.delMap[requestInfo] = struct{}{} rp.delSlice = append(rp.delSlice, requestInfo) } func (rp *Pool) eraseFromDelSlice() { rp.lock.RLock() l := len(rp.delSlice) rp.lock.RUnlock() if l <= defaultSizeOfDelElements { return } rp.lock.Lock() defer rp.lock.Unlock() n := len(rp.delSlice) - defaultSizeOfDelElements for _, r := range rp.delSlice[:n] { delete(rp.delMap, r) } rp.delSlice = rp.delSlice[n:] } // Close removes all the requests, stops all the timeout timers. func (rp *Pool) Close() { rp.lock.Lock() defer rp.lock.Unlock() rp.closed = true for requestInfo, element := range rp.existMap { rp.deleteRequest(element, requestInfo) } rp.cancel() } // StopTimers stops all the timeout timers attached to the pending requests, and marks the pool as "stopped". // This which prevents submission of new requests, and renewal of timeouts by timer go-routines that where running // at the time of the call to StopTimers(). func (rp *Pool) StopTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = true for _, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() } rp.logger.Debugf("Stopped all timers: size=%d", len(rp.existMap)) } // RestartTimers restarts all the timeout timers attached to the pending requests, as RequestForwardTimeout, and re-allows // submission of new requests. func (rp *Pool) RestartTimers() { rp.lock.Lock() defer rp.lock.Unlock() rp.stopped = false for reqInfo, element := range rp.existMap { item := element.Value.(*requestItem) item.timeout.Stop() ri := reqInfo to := time.AfterFunc( rp.options.ForwardTimeout, func() { rp.onRequestTO(item.request, ri) }, ) item.timeout = to } rp.logger.Debugf("Restarted all timers: size=%d", len(rp.existMap)) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start a leader-forwarding timeout") return } // start a second timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.ComplainTimeout, func() { rp.onLeaderFwdRequestTO(request, reqInfo) }, ) rp.logger.Debugf("Request %s; started a leader-forwarding timeout: %s", reqInfo, rp.options.ComplainTimeout) rp.lock.Unlock() // may take time, in case Comm channel to leader is full; hence w/o the lock. rp.logger.Debugf("Request %s timeout expired, going to send to leader", reqInfo) rp.metrics.CountOfLeaderForwardRequest.Add(1) rp.timeoutHandler.OnRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onLeaderFwdRequestTO(request []byte, reqInfo types.RequestInfo) { rp.lock.Lock() element, contains := rp.existMap[reqInfo] if !contains { rp.lock.Unlock() rp.logger.Debugf("Request %s no longer in pool", reqInfo) return } if rp.closed || rp.stopped { rp.lock.Unlock() rp.logger.Debugf("Pool stopped, will NOT start auto-remove timeout") return } // start a third timeout item := element.Value.(*requestItem) item.timeout = time.AfterFunc( rp.options.AutoRemoveTimeout, func() { rp.onAutoRemoveTO(reqInfo) }, ) rp.logger.Debugf("Request %s; started auto-remove timeout: %s", reqInfo, rp.options.AutoRemoveTimeout) rp.lock.Unlock() // may take time, in case Comm channel is full; hence w/o the lock. rp.logger.Debugf("Request %s leader-forwarding timeout expired, going to complain on leader", reqInfo) rp.metrics.CountTimeoutTwoStep.Add(1) rp.timeoutHandler.OnLeaderFwdRequestTimeout(request, reqInfo) } // called by the goroutine spawned by time.AfterFunc func (rp *Pool) onAutoRemoveTO(reqInfo types.RequestInfo) { rp.logger.Debugf("Request %s auto-remove timeout expired, going to remove from pool", reqInfo) if err := rp.RemoveRequest(reqInfo); err != nil { rp.logger.Errorf("Removal of request %s failed; error: %s", reqInfo, err) return } rp.metrics.CountOfDeleteRequestPool.Add(1) rp.timeoutHandler.OnAutoRemoveTimeout(reqInfo) }
Size
identifier_name
crawl_stations_data_and_update_tb.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import swagger_client from swagger_client.rest import ApiException from tb_inmet_utils import renew_token from tb_inmet_utils import get_tb_api_configuration import json import requests import urllib import yaml import ast import sys import argparse import os.path from datetime import datetime, timedelta import csv import calendar import tqdm # load configurations from YAML config file with open("config.yaml", 'r') as yamlfile: cfg_params = yaml.load(yamlfile) # get API access configuration object configuration = get_tb_api_configuration(cfg_params) # create instances of the API class device_controller_api_inst = swagger_client.DeviceControllerApi(swagger_client.ApiClient(configuration)) device_api_controller_api_inst = swagger_client.DeviceApiControllerApi(swagger_client.ApiClient(configuration)) asset_controller_api_inst = swagger_client.AssetControllerApi(swagger_client.ApiClient(configuration)) entity_relation_controller_api_inst = swagger_client.EntityRelationControllerApi(swagger_client.ApiClient(configuration)) def is_valid_file(arg_file_str): if not os.path.exists(arg_file_str): msg = "The file %s does not exist!" % arg_file_str raise argparse.ArgumentTypeError(msg) else: # return an open file handle return open(arg_file_str, 'r') def is_valid_date(arg_date_str): try: return datetime.strptime(arg_date_str, "%d-%m-%Y") except ValueError: msg = "Given date ({0}) not valid! Expected format, DD-MM-YYYY!".format(arg_date_str) raise argparse.ArgumentTypeError(msg) def create_parser(): parser = argparse.ArgumentParser( description='Starts a crawler on INMET and transfers data to ThingsBoard' ) parser.add_argument( '-d', '--input-data-path', dest='input_data_path', type=is_valid_file, required=False, default=None, help='Path to folder containing \'.html\' files with INMET stations data. If this argument is set no other ' 'argument is considered.' ) parser.add_argument( '-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None, help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' + 'if no file provided' ) parser.add_argument( '-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' + 'attribute on ThingsBoard will be considered for each station.' ) parser.add_argument( '-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' + 'will be considered for each station.' ) return parser def get_current_stations(cfg_params):
def get_station(cfg_params, station_name): current_device_id = '' # first get the device id while True: try: api_response = device_controller_api_inst.get_tenant_device_using_get(station_name) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: # TODO: create device when it is not found? ask Professor Dr. Goncalves print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # second get the device from device id try: devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id) except ApiException as e: print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e) return devices[0] def get_station_token(station_id): # get device token device_token = '' while True: try: api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id) device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break return device_token # API version ''' def get_station_attributes(station_token): client_keys = 'url,mostRecentData' station_attributes = '' try: api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys) print(api_response) except ApiException as e: print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e) return station_attributes ''' # requests version def get_station_attributes(station_token): client_keys = 'url,mostRecentData' url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys r = requests.get(url) return r def set_station_attributes(station_token, attributes): # set station attributes while True: try: api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e) break def format_data(rawData): single_str = '' for line in rawData: single_str += line single_str = single_str.replace('\r\n', '') single_str = single_str.replace(' ', '') single_str = single_str.replace('\t', '') data = single_str.split('<br>') data = data[:-1] return data def run_crawler(start_date, end_date, url): # define time period and create session form = { 'dtaini': start_date.strftime("%d/%m/%Y"), 'dtafim': end_date.strftime("%d/%m/%Y"), 'aleaValue': 'NDgyOA==', 'aleaNum': '4828' } encondedForm = urllib.urlencode(form) head = { 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(url, data=encondedForm, headers=head) # get session cookie and get data from site cookie = r.headers["Set-Cookie"] head = { 'Cookie': cookie } fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php' r = requests.get(fixed_url, headers=head) formatted_data = format_data(r) return formatted_data def load_station_data(station_token, station_data): # load station data reader = csv.reader(station_data) keys = reader.next() # iterate over data collects for i, row_of_values in enumerate(reader, start = 0): current_data = dict(zip(keys, row_of_values)) most_recent_data = '' # get date from the most recent data for attribute update if i == 0: most_recent_data = current_data['data'].replace('/','-') # convert current datetime to timestamp date = current_data['data'].split('/') time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0) ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000 json_temp = {'unavailable_data': ''} # adjust data types for key, value in current_data.iteritems(): if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']: try: json_temp[key] = int(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min', 'temp_inst', 'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min', 'pto_orvalho_inst']: try: json_temp[key] = float(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue # clean last character from json unavailable_data key if json_temp['unavailable_data'] != '': json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1] # swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel wind_direction = '' wind_speed = '' if 'vento_vel' in json_temp: wind_direction = json_temp['vento_vel'] json_temp.pop('vento_vel') if 'vento_direcao' in json_temp: wind_speed = json_temp['vento_direcao'] json_temp.pop('vento_direcao') if wind_direction != '': json_temp['vento_direcao'] = wind_direction if wind_speed != '': json_temp['vento_vel'] = wind_speed # write data to thingsboard # 1 - format json json_data = {} json_data['values'] = json_temp json_data['ts'] = ts_utc # 2 - write data while True: try: api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e) break # update mostRecentData attribute json_data = {} json_data = {'mostRecentData':most_recent_data} set_station_attributes(station_token, json_data) pass def walkdir(folder): # walk through each files in a directory for dirpath, dirs, files in os.walk(folder): for filename in files: if filename.endswith(".html"): yield os.path.abspath(os.path.join(dirpath, filename)) def send_data_from_file(file_path): file = open(file_path, 'r') formatted_data = format_data(file) # get station code station_code = file_path.split('.')[0].split('-')[-1] # 1 - get device id from station code current_device_id = "" while True: try: # get device id api_response = device_controller_api_inst.get_tenant_device_using_get(station_code) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # 2 - get device token from device id current_device_token = "" while True: try: # getDeviceCredentialsByDeviceId api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(current_device_id) current_device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break load_station_data(current_device_token, formatted_data) # function that iterates over all folders def iterate_over_all_files(root_path): # compute the total number of files file_counter = 0 for file_path in walkdir(root_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for file_path in walkdir(root_path): send_data_from_file(file_path) pbar.set_postfix(file=file_path, refresh=False) pbar.update() def main(): ''' run_crawler(datetime.today(), datetime.today(), 'http://www.inmet.gov.br/sonabra/pg_dspDadosCodigo_sim.php?QTMwMQ==') ''' parser = create_parser() args = parser.parse_args() stations = [] # verify if there is a path with input files if args.input_data_path: file_counter = 0 for filename in walkdir(args.input_data_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for filename in walkdir(args.input_data_path): send_data_from_file(filename) pbar.set_postfix(file=filename, refresh=False) pbar.update() else: # verify if there is a file with a list of stations if args.input_stations_file: # if so, read file to a list file_content = args.input_stations_file.readlines() file_content = [x.strip() for x in file_content] # query defined stations for station_name in file_content: stations.append(get_station(cfg_params, station_name)) else: # query all stations stations = get_current_stations(cfg_params) # set progress bar # with tqdm(total=len(stations), unit='stations') as pbar: # iterates over all stations for station in stations: # get station access token station_token = get_station_token(station.id.id) # get station attributes station_attributes = get_station_attributes(station_token) # verify if there is a start date if not args.start_date: # verify device mostRecentData to define start_date # if mostRecentData is empty define start_date to 365 days before today if station_attributes['mostRecentData'] == '': start_date = datetime.today() - timedelta(days=365) else: start_date = station_attributes['mostRecentData'] else: start_date = args.start_date # verify if there is a end date if not args.end_date: # set today as end_date end_date = datetime.today() else: end_date = args.end_date station_data = run_crawler(start_date, end_date, station_attributes['url']) load_station_data(station_token, station_data) # pbar.set_postfix(current_station=station['stationCode'], refresh=False) # pbar.update() if __name__ == '__main__': main()
relation_search_parameters = swagger_client.RelationsSearchParameters( root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0) query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters, relation_type='Contains') query.parameters = relation_search_parameters while True: try: stations_list = device_controller_api_inst.find_by_query_using_post1(query) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e) break return stations_list
identifier_body
crawl_stations_data_and_update_tb.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import swagger_client from swagger_client.rest import ApiException from tb_inmet_utils import renew_token from tb_inmet_utils import get_tb_api_configuration import json import requests import urllib import yaml import ast import sys import argparse import os.path from datetime import datetime, timedelta import csv import calendar import tqdm # load configurations from YAML config file with open("config.yaml", 'r') as yamlfile: cfg_params = yaml.load(yamlfile) # get API access configuration object configuration = get_tb_api_configuration(cfg_params) # create instances of the API class device_controller_api_inst = swagger_client.DeviceControllerApi(swagger_client.ApiClient(configuration)) device_api_controller_api_inst = swagger_client.DeviceApiControllerApi(swagger_client.ApiClient(configuration)) asset_controller_api_inst = swagger_client.AssetControllerApi(swagger_client.ApiClient(configuration)) entity_relation_controller_api_inst = swagger_client.EntityRelationControllerApi(swagger_client.ApiClient(configuration)) def is_valid_file(arg_file_str): if not os.path.exists(arg_file_str): msg = "The file %s does not exist!" % arg_file_str raise argparse.ArgumentTypeError(msg) else: # return an open file handle return open(arg_file_str, 'r') def is_valid_date(arg_date_str): try: return datetime.strptime(arg_date_str, "%d-%m-%Y") except ValueError: msg = "Given date ({0}) not valid! Expected format, DD-MM-YYYY!".format(arg_date_str) raise argparse.ArgumentTypeError(msg) def create_parser(): parser = argparse.ArgumentParser( description='Starts a crawler on INMET and transfers data to ThingsBoard' ) parser.add_argument( '-d', '--input-data-path', dest='input_data_path', type=is_valid_file, required=False, default=None, help='Path to folder containing \'.html\' files with INMET stations data. If this argument is set no other ' 'argument is considered.' ) parser.add_argument( '-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None, help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' + 'if no file provided' ) parser.add_argument( '-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' + 'attribute on ThingsBoard will be considered for each station.' ) parser.add_argument( '-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' + 'will be considered for each station.' ) return parser def get_current_stations(cfg_params): relation_search_parameters = swagger_client.RelationsSearchParameters( root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0) query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters, relation_type='Contains') query.parameters = relation_search_parameters while True: try: stations_list = device_controller_api_inst.find_by_query_using_post1(query) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e) break return stations_list def get_station(cfg_params, station_name): current_device_id = '' # first get the device id while True: try: api_response = device_controller_api_inst.get_tenant_device_using_get(station_name) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: # TODO: create device when it is not found? ask Professor Dr. Goncalves print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # second get the device from device id try: devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id) except ApiException as e: print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e) return devices[0] def get_station_token(station_id): # get device token device_token = '' while True: try: api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id) device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break return device_token # API version ''' def get_station_attributes(station_token): client_keys = 'url,mostRecentData' station_attributes = '' try: api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys) print(api_response) except ApiException as e: print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e) return station_attributes ''' # requests version def get_station_attributes(station_token): client_keys = 'url,mostRecentData' url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys r = requests.get(url) return r def set_station_attributes(station_token, attributes): # set station attributes while True: try: api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e) break def format_data(rawData): single_str = '' for line in rawData: single_str += line single_str = single_str.replace('\r\n', '') single_str = single_str.replace(' ', '') single_str = single_str.replace('\t', '') data = single_str.split('<br>') data = data[:-1] return data def run_crawler(start_date, end_date, url): # define time period and create session form = { 'dtaini': start_date.strftime("%d/%m/%Y"), 'dtafim': end_date.strftime("%d/%m/%Y"), 'aleaValue': 'NDgyOA==', 'aleaNum': '4828' } encondedForm = urllib.urlencode(form) head = { 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(url, data=encondedForm, headers=head) # get session cookie and get data from site cookie = r.headers["Set-Cookie"] head = { 'Cookie': cookie } fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php' r = requests.get(fixed_url, headers=head) formatted_data = format_data(r) return formatted_data def load_station_data(station_token, station_data): # load station data reader = csv.reader(station_data) keys = reader.next() # iterate over data collects for i, row_of_values in enumerate(reader, start = 0): current_data = dict(zip(keys, row_of_values)) most_recent_data = '' # get date from the most recent data for attribute update
ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000 json_temp = {'unavailable_data': ''} # adjust data types for key, value in current_data.iteritems(): if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']: try: json_temp[key] = int(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min', 'temp_inst', 'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min', 'pto_orvalho_inst']: try: json_temp[key] = float(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue # clean last character from json unavailable_data key if json_temp['unavailable_data'] != '': json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1] # swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel wind_direction = '' wind_speed = '' if 'vento_vel' in json_temp: wind_direction = json_temp['vento_vel'] json_temp.pop('vento_vel') if 'vento_direcao' in json_temp: wind_speed = json_temp['vento_direcao'] json_temp.pop('vento_direcao') if wind_direction != '': json_temp['vento_direcao'] = wind_direction if wind_speed != '': json_temp['vento_vel'] = wind_speed # write data to thingsboard # 1 - format json json_data = {} json_data['values'] = json_temp json_data['ts'] = ts_utc # 2 - write data while True: try: api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e) break # update mostRecentData attribute json_data = {} json_data = {'mostRecentData':most_recent_data} set_station_attributes(station_token, json_data) pass def walkdir(folder): # walk through each files in a directory for dirpath, dirs, files in os.walk(folder): for filename in files: if filename.endswith(".html"): yield os.path.abspath(os.path.join(dirpath, filename)) def send_data_from_file(file_path): file = open(file_path, 'r') formatted_data = format_data(file) # get station code station_code = file_path.split('.')[0].split('-')[-1] # 1 - get device id from station code current_device_id = "" while True: try: # get device id api_response = device_controller_api_inst.get_tenant_device_using_get(station_code) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # 2 - get device token from device id current_device_token = "" while True: try: # getDeviceCredentialsByDeviceId api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(current_device_id) current_device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break load_station_data(current_device_token, formatted_data) # function that iterates over all folders def iterate_over_all_files(root_path): # compute the total number of files file_counter = 0 for file_path in walkdir(root_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for file_path in walkdir(root_path): send_data_from_file(file_path) pbar.set_postfix(file=file_path, refresh=False) pbar.update() def main(): ''' run_crawler(datetime.today(), datetime.today(), 'http://www.inmet.gov.br/sonabra/pg_dspDadosCodigo_sim.php?QTMwMQ==') ''' parser = create_parser() args = parser.parse_args() stations = [] # verify if there is a path with input files if args.input_data_path: file_counter = 0 for filename in walkdir(args.input_data_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for filename in walkdir(args.input_data_path): send_data_from_file(filename) pbar.set_postfix(file=filename, refresh=False) pbar.update() else: # verify if there is a file with a list of stations if args.input_stations_file: # if so, read file to a list file_content = args.input_stations_file.readlines() file_content = [x.strip() for x in file_content] # query defined stations for station_name in file_content: stations.append(get_station(cfg_params, station_name)) else: # query all stations stations = get_current_stations(cfg_params) # set progress bar # with tqdm(total=len(stations), unit='stations') as pbar: # iterates over all stations for station in stations: # get station access token station_token = get_station_token(station.id.id) # get station attributes station_attributes = get_station_attributes(station_token) # verify if there is a start date if not args.start_date: # verify device mostRecentData to define start_date # if mostRecentData is empty define start_date to 365 days before today if station_attributes['mostRecentData'] == '': start_date = datetime.today() - timedelta(days=365) else: start_date = station_attributes['mostRecentData'] else: start_date = args.start_date # verify if there is a end date if not args.end_date: # set today as end_date end_date = datetime.today() else: end_date = args.end_date station_data = run_crawler(start_date, end_date, station_attributes['url']) load_station_data(station_token, station_data) # pbar.set_postfix(current_station=station['stationCode'], refresh=False) # pbar.update() if __name__ == '__main__': main()
if i == 0: most_recent_data = current_data['data'].replace('/','-') # convert current datetime to timestamp date = current_data['data'].split('/') time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0)
random_line_split
crawl_stations_data_and_update_tb.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import swagger_client from swagger_client.rest import ApiException from tb_inmet_utils import renew_token from tb_inmet_utils import get_tb_api_configuration import json import requests import urllib import yaml import ast import sys import argparse import os.path from datetime import datetime, timedelta import csv import calendar import tqdm # load configurations from YAML config file with open("config.yaml", 'r') as yamlfile: cfg_params = yaml.load(yamlfile) # get API access configuration object configuration = get_tb_api_configuration(cfg_params) # create instances of the API class device_controller_api_inst = swagger_client.DeviceControllerApi(swagger_client.ApiClient(configuration)) device_api_controller_api_inst = swagger_client.DeviceApiControllerApi(swagger_client.ApiClient(configuration)) asset_controller_api_inst = swagger_client.AssetControllerApi(swagger_client.ApiClient(configuration)) entity_relation_controller_api_inst = swagger_client.EntityRelationControllerApi(swagger_client.ApiClient(configuration)) def is_valid_file(arg_file_str): if not os.path.exists(arg_file_str): msg = "The file %s does not exist!" % arg_file_str raise argparse.ArgumentTypeError(msg) else: # return an open file handle return open(arg_file_str, 'r') def is_valid_date(arg_date_str): try: return datetime.strptime(arg_date_str, "%d-%m-%Y") except ValueError: msg = "Given date ({0}) not valid! Expected format, DD-MM-YYYY!".format(arg_date_str) raise argparse.ArgumentTypeError(msg) def create_parser(): parser = argparse.ArgumentParser( description='Starts a crawler on INMET and transfers data to ThingsBoard' ) parser.add_argument( '-d', '--input-data-path', dest='input_data_path', type=is_valid_file, required=False, default=None, help='Path to folder containing \'.html\' files with INMET stations data. If this argument is set no other ' 'argument is considered.' ) parser.add_argument( '-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None, help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' + 'if no file provided' ) parser.add_argument( '-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' + 'attribute on ThingsBoard will be considered for each station.' ) parser.add_argument( '-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' + 'will be considered for each station.' ) return parser def get_current_stations(cfg_params): relation_search_parameters = swagger_client.RelationsSearchParameters( root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0) query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters, relation_type='Contains') query.parameters = relation_search_parameters while True: try: stations_list = device_controller_api_inst.find_by_query_using_post1(query) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e) break return stations_list def get_station(cfg_params, station_name): current_device_id = '' # first get the device id while True: try: api_response = device_controller_api_inst.get_tenant_device_using_get(station_name) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: # TODO: create device when it is not found? ask Professor Dr. Goncalves print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # second get the device from device id try: devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id) except ApiException as e: print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e) return devices[0] def get_station_token(station_id): # get device token device_token = '' while True: try: api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id) device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break return device_token # API version ''' def get_station_attributes(station_token): client_keys = 'url,mostRecentData' station_attributes = '' try: api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys) print(api_response) except ApiException as e: print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e) return station_attributes ''' # requests version def get_station_attributes(station_token): client_keys = 'url,mostRecentData' url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys r = requests.get(url) return r def set_station_attributes(station_token, attributes): # set station attributes while True: try: api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e) break def format_data(rawData): single_str = '' for line in rawData: single_str += line single_str = single_str.replace('\r\n', '') single_str = single_str.replace(' ', '') single_str = single_str.replace('\t', '') data = single_str.split('<br>') data = data[:-1] return data def run_crawler(start_date, end_date, url): # define time period and create session form = { 'dtaini': start_date.strftime("%d/%m/%Y"), 'dtafim': end_date.strftime("%d/%m/%Y"), 'aleaValue': 'NDgyOA==', 'aleaNum': '4828' } encondedForm = urllib.urlencode(form) head = { 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(url, data=encondedForm, headers=head) # get session cookie and get data from site cookie = r.headers["Set-Cookie"] head = { 'Cookie': cookie } fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php' r = requests.get(fixed_url, headers=head) formatted_data = format_data(r) return formatted_data def load_station_data(station_token, station_data): # load station data reader = csv.reader(station_data) keys = reader.next() # iterate over data collects for i, row_of_values in enumerate(reader, start = 0): current_data = dict(zip(keys, row_of_values)) most_recent_data = '' # get date from the most recent data for attribute update if i == 0: most_recent_data = current_data['data'].replace('/','-') # convert current datetime to timestamp date = current_data['data'].split('/') time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0) ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000 json_temp = {'unavailable_data': ''} # adjust data types for key, value in current_data.iteritems(): if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']: try: json_temp[key] = int(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min', 'temp_inst', 'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min', 'pto_orvalho_inst']: try: json_temp[key] = float(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue # clean last character from json unavailable_data key if json_temp['unavailable_data'] != '': json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1] # swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel wind_direction = '' wind_speed = '' if 'vento_vel' in json_temp: wind_direction = json_temp['vento_vel'] json_temp.pop('vento_vel') if 'vento_direcao' in json_temp: wind_speed = json_temp['vento_direcao'] json_temp.pop('vento_direcao') if wind_direction != '': json_temp['vento_direcao'] = wind_direction if wind_speed != '': json_temp['vento_vel'] = wind_speed # write data to thingsboard # 1 - format json json_data = {} json_data['values'] = json_temp json_data['ts'] = ts_utc # 2 - write data while True: try: api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e) break # update mostRecentData attribute json_data = {} json_data = {'mostRecentData':most_recent_data} set_station_attributes(station_token, json_data) pass def walkdir(folder): # walk through each files in a directory for dirpath, dirs, files in os.walk(folder): for filename in files: if filename.endswith(".html"): yield os.path.abspath(os.path.join(dirpath, filename)) def send_data_from_file(file_path): file = open(file_path, 'r') formatted_data = format_data(file) # get station code station_code = file_path.split('.')[0].split('-')[-1] # 1 - get device id from station code current_device_id = "" while True: try: # get device id api_response = device_controller_api_inst.get_tenant_device_using_get(station_code) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # 2 - get device token from device id current_device_token = "" while True: try: # getDeviceCredentialsByDeviceId api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(current_device_id) current_device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break load_station_data(current_device_token, formatted_data) # function that iterates over all folders def iterate_over_all_files(root_path): # compute the total number of files file_counter = 0 for file_path in walkdir(root_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for file_path in walkdir(root_path): send_data_from_file(file_path) pbar.set_postfix(file=file_path, refresh=False) pbar.update() def main(): ''' run_crawler(datetime.today(), datetime.today(), 'http://www.inmet.gov.br/sonabra/pg_dspDadosCodigo_sim.php?QTMwMQ==') ''' parser = create_parser() args = parser.parse_args() stations = [] # verify if there is a path with input files if args.input_data_path: file_counter = 0 for filename in walkdir(args.input_data_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for filename in walkdir(args.input_data_path): send_data_from_file(filename) pbar.set_postfix(file=filename, refresh=False) pbar.update() else: # verify if there is a file with a list of stations if args.input_stations_file: # if so, read file to a list file_content = args.input_stations_file.readlines() file_content = [x.strip() for x in file_content] # query defined stations for station_name in file_content: stations.append(get_station(cfg_params, station_name)) else: # query all stations stations = get_current_stations(cfg_params) # set progress bar # with tqdm(total=len(stations), unit='stations') as pbar: # iterates over all stations for station in stations: # get station access token station_token = get_station_token(station.id.id) # get station attributes station_attributes = get_station_attributes(station_token) # verify if there is a start date if not args.start_date: # verify device mostRecentData to define start_date # if mostRecentData is empty define start_date to 365 days before today if station_attributes['mostRecentData'] == '': start_date = datetime.today() - timedelta(days=365) else:
else: start_date = args.start_date # verify if there is a end date if not args.end_date: # set today as end_date end_date = datetime.today() else: end_date = args.end_date station_data = run_crawler(start_date, end_date, station_attributes['url']) load_station_data(station_token, station_data) # pbar.set_postfix(current_station=station['stationCode'], refresh=False) # pbar.update() if __name__ == '__main__': main()
start_date = station_attributes['mostRecentData']
conditional_block
crawl_stations_data_and_update_tb.py
#!/usr/bin/env python # -*- coding: utf-8 -*- from __future__ import print_function import swagger_client from swagger_client.rest import ApiException from tb_inmet_utils import renew_token from tb_inmet_utils import get_tb_api_configuration import json import requests import urllib import yaml import ast import sys import argparse import os.path from datetime import datetime, timedelta import csv import calendar import tqdm # load configurations from YAML config file with open("config.yaml", 'r') as yamlfile: cfg_params = yaml.load(yamlfile) # get API access configuration object configuration = get_tb_api_configuration(cfg_params) # create instances of the API class device_controller_api_inst = swagger_client.DeviceControllerApi(swagger_client.ApiClient(configuration)) device_api_controller_api_inst = swagger_client.DeviceApiControllerApi(swagger_client.ApiClient(configuration)) asset_controller_api_inst = swagger_client.AssetControllerApi(swagger_client.ApiClient(configuration)) entity_relation_controller_api_inst = swagger_client.EntityRelationControllerApi(swagger_client.ApiClient(configuration)) def is_valid_file(arg_file_str): if not os.path.exists(arg_file_str): msg = "The file %s does not exist!" % arg_file_str raise argparse.ArgumentTypeError(msg) else: # return an open file handle return open(arg_file_str, 'r') def is_valid_date(arg_date_str): try: return datetime.strptime(arg_date_str, "%d-%m-%Y") except ValueError: msg = "Given date ({0}) not valid! Expected format, DD-MM-YYYY!".format(arg_date_str) raise argparse.ArgumentTypeError(msg) def create_parser(): parser = argparse.ArgumentParser( description='Starts a crawler on INMET and transfers data to ThingsBoard' ) parser.add_argument( '-d', '--input-data-path', dest='input_data_path', type=is_valid_file, required=False, default=None, help='Path to folder containing \'.html\' files with INMET stations data. If this argument is set no other ' 'argument is considered.' ) parser.add_argument( '-i', '--input-stations-file', dest='input_stations_file', type=is_valid_file, required=False, default=None, help='File with a list of desired INMET stations (one name per row). Fetch all available stations data ' + 'if no file provided' ) parser.add_argument( '-s', '--start-date', dest='start_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will start on DD-MM-YYYY format. If no date provided, mostRecentUpdate ' + 'attribute on ThingsBoard will be considered for each station.' ) parser.add_argument( '-e', '--end-date', dest='end_date', type=is_valid_date, required=False, default=None, help='Date in which data fetch will stop on DD-MM-YYYY format. If no date provided, the day of today ' + 'will be considered for each station.' ) return parser def get_current_stations(cfg_params): relation_search_parameters = swagger_client.RelationsSearchParameters( root_id=cfg_params['tb_entities_access']['root_asset_id'], root_type='ASSET', direction='FROM', max_level=0) query = swagger_client.DeviceSearchQuery(device_types=['automatic-station'], parameters=relation_search_parameters, relation_type='Contains') query.parameters = relation_search_parameters while True: try: stations_list = device_controller_api_inst.find_by_query_using_post1(query) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceControllerApi->find_by_query_using_post1: %s\n" % e) break return stations_list def get_station(cfg_params, station_name): current_device_id = '' # first get the device id while True: try: api_response = device_controller_api_inst.get_tenant_device_using_get(station_name) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: # TODO: create device when it is not found? ask Professor Dr. Goncalves print("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # second get the device from device id try: devices = device_controller_api_inst.get_devices_by_ids_using_get(current_device_id) except ApiException as e: print("Exception when calling DeviceControllerApi->get_devices_by_ids_using_get: %s\n" % e) return devices[0] def get_station_token(station_id): # get device token device_token = '' while True: try: api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(station_id) device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break return device_token # API version ''' def get_station_attributes(station_token): client_keys = 'url,mostRecentData' station_attributes = '' try: api_response = device_api_controller_api_inst.get_device_attributes_using_get(station_token, shared_keys=client_keys) print(api_response) except ApiException as e: print("Exception when calling DeviceApiControllerApi->get_device_attributes_using_get: %s\n" % e) return station_attributes ''' # requests version def get_station_attributes(station_token): client_keys = 'url,mostRecentData' url = 'http://'+ cfg_params['tb_api_access']['host'] + '/api/v1/' + station_token + '/attributes?clientKeys=' + client_keys r = requests.get(url) return r def set_station_attributes(station_token, attributes): # set station attributes while True: try: api_response = device_api_controller_api_inst.post_device_attributes_using_post(station_token, attributes) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_device_attributes_using_post: %s\n" % e) break def format_data(rawData): single_str = '' for line in rawData: single_str += line single_str = single_str.replace('\r\n', '') single_str = single_str.replace(' ', '') single_str = single_str.replace('\t', '') data = single_str.split('<br>') data = data[:-1] return data def run_crawler(start_date, end_date, url): # define time period and create session form = { 'dtaini': start_date.strftime("%d/%m/%Y"), 'dtafim': end_date.strftime("%d/%m/%Y"), 'aleaValue': 'NDgyOA==', 'aleaNum': '4828' } encondedForm = urllib.urlencode(form) head = { 'Content-Type': 'application/x-www-form-urlencoded' } r = requests.post(url, data=encondedForm, headers=head) # get session cookie and get data from site cookie = r.headers["Set-Cookie"] head = { 'Cookie': cookie } fixed_url = 'http://www.inmet.gov.br/sonabra/pg_downDadosCodigo_sim.php' r = requests.get(fixed_url, headers=head) formatted_data = format_data(r) return formatted_data def
(station_token, station_data): # load station data reader = csv.reader(station_data) keys = reader.next() # iterate over data collects for i, row_of_values in enumerate(reader, start = 0): current_data = dict(zip(keys, row_of_values)) most_recent_data = '' # get date from the most recent data for attribute update if i == 0: most_recent_data = current_data['data'].replace('/','-') # convert current datetime to timestamp date = current_data['data'].split('/') time_tuple_utc = (int(date[2]), int(date[1]), int(date[0]), int(current_data['hora']), 0, 0) ts_utc = int(calendar.timegm(time_tuple_utc)) * 1000 json_temp = {'unavailable_data': ''} # adjust data types for key, value in current_data.iteritems(): if key in ['hora', 'vento_vel', 'umid_max', 'umid_min', 'umid_inst']: try: json_temp[key] = int(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue elif key in ['radiacao', 'precipitacao', 'vento_direcao', 'vento_rajada', 'temp_max', 'temp_min', 'temp_inst', 'pressao_max', 'pressao_min', 'pressao', 'pto_orvalho_max', 'pto_orvalho_min', 'pto_orvalho_inst']: try: json_temp[key] = float(current_data[key]) except ValueError: json_temp['unavailable_data'] = json_temp['unavailable_data'] + (key + ',') current_data[key] = '-' continue # clean last character from json unavailable_data key if json_temp['unavailable_data'] != '': json_temp['unavailable_data'] = json_temp['unavailable_data'][0:-1] # swap wind information due to problem on inmet crawled data vento_direcao <-> vento_vel wind_direction = '' wind_speed = '' if 'vento_vel' in json_temp: wind_direction = json_temp['vento_vel'] json_temp.pop('vento_vel') if 'vento_direcao' in json_temp: wind_speed = json_temp['vento_direcao'] json_temp.pop('vento_direcao') if wind_direction != '': json_temp['vento_direcao'] = wind_direction if wind_speed != '': json_temp['vento_vel'] = wind_speed # write data to thingsboard # 1 - format json json_data = {} json_data['values'] = json_temp json_data['ts'] = ts_utc # 2 - write data while True: try: api_response = device_api_controller_api_inst.post_telemetry_using_post(station_token, json_data) except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: print("Exception when calling DeviceApiControllerApi->post_telemetry_using_post: %s\n" % e) break # update mostRecentData attribute json_data = {} json_data = {'mostRecentData':most_recent_data} set_station_attributes(station_token, json_data) pass def walkdir(folder): # walk through each files in a directory for dirpath, dirs, files in os.walk(folder): for filename in files: if filename.endswith(".html"): yield os.path.abspath(os.path.join(dirpath, filename)) def send_data_from_file(file_path): file = open(file_path, 'r') formatted_data = format_data(file) # get station code station_code = file_path.split('.')[0].split('-')[-1] # 1 - get device id from station code current_device_id = "" while True: try: # get device id api_response = device_controller_api_inst.get_tenant_device_using_get(station_code) current_device_id = api_response.id.id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write("Exception when calling DeviceControllerApi->save_device_using_post: %s\n" % e) break # 2 - get device token from device id current_device_token = "" while True: try: # getDeviceCredentialsByDeviceId api_response = device_controller_api_inst.get_device_credentials_by_device_id_using_get(current_device_id) current_device_token = api_response.credentials_id except ApiException as e: if (json.loads(e.body)['message'] == 'Token has expired'): renew_token(configuration) continue else: tqdm.write( "Exception when calling DeviceControllerApi->get_device_credentials_by_device_id_using_get: %s\n" % e) break load_station_data(current_device_token, formatted_data) # function that iterates over all folders def iterate_over_all_files(root_path): # compute the total number of files file_counter = 0 for file_path in walkdir(root_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for file_path in walkdir(root_path): send_data_from_file(file_path) pbar.set_postfix(file=file_path, refresh=False) pbar.update() def main(): ''' run_crawler(datetime.today(), datetime.today(), 'http://www.inmet.gov.br/sonabra/pg_dspDadosCodigo_sim.php?QTMwMQ==') ''' parser = create_parser() args = parser.parse_args() stations = [] # verify if there is a path with input files if args.input_data_path: file_counter = 0 for filename in walkdir(args.input_data_path): file_counter += 1 # iterates over all files with tqdm(total=file_counter, unit='files') as pbar: for filename in walkdir(args.input_data_path): send_data_from_file(filename) pbar.set_postfix(file=filename, refresh=False) pbar.update() else: # verify if there is a file with a list of stations if args.input_stations_file: # if so, read file to a list file_content = args.input_stations_file.readlines() file_content = [x.strip() for x in file_content] # query defined stations for station_name in file_content: stations.append(get_station(cfg_params, station_name)) else: # query all stations stations = get_current_stations(cfg_params) # set progress bar # with tqdm(total=len(stations), unit='stations') as pbar: # iterates over all stations for station in stations: # get station access token station_token = get_station_token(station.id.id) # get station attributes station_attributes = get_station_attributes(station_token) # verify if there is a start date if not args.start_date: # verify device mostRecentData to define start_date # if mostRecentData is empty define start_date to 365 days before today if station_attributes['mostRecentData'] == '': start_date = datetime.today() - timedelta(days=365) else: start_date = station_attributes['mostRecentData'] else: start_date = args.start_date # verify if there is a end date if not args.end_date: # set today as end_date end_date = datetime.today() else: end_date = args.end_date station_data = run_crawler(start_date, end_date, station_attributes['url']) load_station_data(station_token, station_data) # pbar.set_postfix(current_station=station['stationCode'], refresh=False) # pbar.update() if __name__ == '__main__': main()
load_station_data
identifier_name
virtio_constants.rs
#![allow(dead_code)] #![allow(clippy::all)] // Copied from the ixy C driver // Amended with updates from the newer Virtio spec v1.1 /*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VirtIO Header, located in BAR 0. */ pub const VIRTIO_PCI_HOST_FEATURES: u64 = 0; /* host's supported features (32bit, RO)*/ pub const VIRTIO_PCI_GUEST_FEATURES: u64 = 4; /* guest's supported features (32, RW) */ pub const VIRTIO_PCI_QUEUE_PFN: u64 = 8; /* physical address of VQ (32, RW) */ pub const VIRTIO_PCI_QUEUE_NUM: u64 = 12; /* number of ring entries (16, RO) */ pub const VIRTIO_PCI_QUEUE_SEL: u64 = 14; /* current VQ selection (16, RW) */ pub const VIRTIO_PCI_QUEUE_NOTIFY: u64 = 16; /* notify host regarding VQ (16, RW) */ pub const VIRTIO_PCI_STATUS: u64 = 18; /* device status register (8, RW) */ pub const VIRTIO_PCI_ISR: u64 = 19; /* interrupt status register, reading also clears the register (8, RO) */ /* Only if MSIX is enabled: */ pub const VIRTIO_MSI_CONFIG_VECTOR: u64 = 20; /* configuration change vector (16, RW) */ pub const VIRTIO_MSI_QUEUE_VECTOR: u64 = 22; /* vector for selected VQ notifications (16, RW) */ /* Status byte for guest to report progress. */ pub const VIRTIO_CONFIG_STATUS_RESET: u8 = 0x00; pub const VIRTIO_CONFIG_STATUS_ACK: u8 = 0x01; pub const VIRTIO_CONFIG_STATUS_DRIVER: u8 = 0x02; pub const VIRTIO_CONFIG_STATUS_DRIVER_OK: u8 = 0x04; pub const VIRTIO_CONFIG_STATUS_FEATURES_OK: u8 = 0x08; pub const VIRTIO_CONFIG_STATUS_FAILED: u8 = 0x80; /* * How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ pub const VIRTIO_PCI_QUEUE_ADDR_SHIFT: usize = 12; /* This marks a buffer as continuing via the next field. */ pub const VIRTQ_DESC_F_NEXT: u16 = 1; /* This marks a buffer as write-only (otherwise read-only). */ pub const VIRTQ_DESC_F_WRITE: u16 = 2; /* This means the buffer contains a list of buffer descriptors. */ pub const VIRTQ_DESC_F_INDIRECT: u16 = 4; /* The feature bitmap for virtio net */ pub const VIRTIO_NET_F_CSUM: usize = 0; /* Host handles pkts w/ partial csum */ pub const VIRTIO_NET_F_GUEST_CSUM: usize = 1; /* Guest handles pkts w/ partial csum */ pub const VIRTIO_NET_F_MTU: usize = 3; /* Initial MTU advice. */ pub const VIRTIO_NET_F_MAC: usize = 5; /* Host has given MAC address. */ pub const VIRTIO_NET_F_GUEST_TSO4: usize = 7; /* Guest can handle TSOv4 in. */ pub const VIRTIO_NET_F_GUEST_TSO6: usize = 8; /* Guest can handle TSOv6 in. */ pub const VIRTIO_NET_F_GUEST_ECN: usize = 9; /* Guest can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_GUEST_UFO: usize = 10; /* Guest can handle UFO in. */ pub const VIRTIO_NET_F_HOST_TSO4: usize = 11; /* Host can handle TSOv4 in. */ pub const VIRTIO_NET_F_HOST_TSO6: usize = 12; /* Host can handle TSOv6 in. */ pub const VIRTIO_NET_F_HOST_ECN: usize = 13; /* Host can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_HOST_UFO: usize = 14; /* Host can handle UFO in. */ pub const VIRTIO_NET_F_MRG_RXBUF: usize = 15; /* Host can merge receive buffers. */
pub const VIRTIO_NET_F_STATUS: usize = 16; /* virtio_net_config.status available */ pub const VIRTIO_NET_F_CTRL_VQ: usize = 17; /* Control channel available */ pub const VIRTIO_NET_F_CTRL_RX: usize = 18; /* Control channel RX mode support */ pub const VIRTIO_NET_F_CTRL_VLAN: usize = 19; /* Control channel VLAN filtering */ pub const VIRTIO_NET_F_CTRL_RX_EXTRA: usize = 20; /* Extra RX mode control support */ pub const VIRTIO_NET_F_GUEST_ANNOUNCE: usize = 21; /* Guest can announce device on the network */ pub const VIRTIO_NET_F_MQ: usize = 22; /* Device supports Receive Flow Steering */ pub const VIRTIO_NET_F_CTRL_MAC_ADDR: usize = 23; /* Set MAC address */ /* Do we get callbacks when the ring is completely used, even if we've suppressed them? */ pub const VIRTIO_F_NOTIFY_ON_EMPTY: usize = 24; /* Can the device handle any descriptor layout? */ pub const VIRTIO_F_ANY_LAYOUT: usize = 27; /* We support indirect buffer descriptors */ pub const VIRTIO_RING_F_INDIRECT_DESC: usize = 28; pub const VIRTIO_F_VERSION_1: usize = 32; pub const VIRTIO_F_IOMMU_PLATFORM: usize = 33; /** * Control the RX mode, ie. promiscuous, allmulti, etc... * All commands require an "out" sg entry containing a 1 byte * state value, zero = disable, non-zero = enable. Commands * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. */ pub const VIRTIO_NET_CTRL_RX: u8 = 0; pub const VIRTIO_NET_CTRL_RX_PROMISC: u8 = 0; pub const VIRTIO_NET_CTRL_RX_ALLMULTI: u8 = 1; pub const VIRTIO_NET_CTRL_RX_ALLUNI: u8 = 2; pub const VIRTIO_NET_CTRL_RX_NOMULTI: u8 = 3; pub const VIRTIO_NET_CTRL_RX_NOUNI: u8 = 4; pub const VIRTIO_NET_CTRL_RX_NOBCAST: u8 = 5; pub const VIRTIO_NET_OK: u8 = 0; pub const VIRTIO_NET_ERR: u8 = 1; pub const VIRTIO_MAX_CTRL_DATA: usize = 2048; /** * This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ #[repr(C)] pub struct virtio_net_hdr { pub flags: u8, pub gso_type: u8, pub hdr_len: u16, // Ethernet + IP + tcp/udp hdrs pub gso_size: u16, // Bytes to append to hdr_len per frame pub csum_start: u16, // Position to start checksumming from pub csum_offset: u16, // Offset after that to place checksum } pub const VIRTIO_NET_HDR_F_NEEDS_CSUM: u8 = 1; /**< Use csum_start,csum_offset*/ pub const VIRTIO_NET_HDR_F_DATA_VALID: u8 = 2; /**< Checksum is valid */ pub const VIRTIO_NET_HDR_GSO_NONE: u8 = 0; /**< Not a GSO frame */ pub const VIRTIO_NET_HDR_GSO_TCPV4: u8 = 1; /**< GSO frame, IPv4 TCP (TSO) */ pub const VIRTIO_NET_HDR_GSO_UDP: u8 = 3; /**< GSO frame, IPv4 UDP (UFO) */ pub const VIRTIO_NET_HDR_GSO_TCPV6: u8 = 4; /**< GSO frame, IPv6 TCP */ pub const VIRTIO_NET_HDR_GSO_ECN: u8 = 0x80; /**< TCP has ECN set */ /* The Host uses this in used->flags to advise the Guest: don't kick me * when you add a buffer. It's unreliable, so it's simply an * optimization. Guest will still kick if it's out of buffers. */ pub const VIRTQ_USED_F_NO_NOTIFY: u16 = 1; /* The Guest uses this in avail->flags to advise the Host: don't * interrupt me when you consume a buffer. It's unreliable, so it's * simply an optimization. */ pub const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 1; use std::num::Wrapping; /* VirtIO ring descriptors: 16 bytes. * These can chain together via "next". */ #[repr(C)] #[derive(Default)] pub struct VirtqDesc { pub addr: usize, /* Address (guest-physical). */ pub len: u32, /* Length. */ pub flags: u16, /* The flags as indicated above. */ pub next: u16, /* We chain unused descriptors via this. */ } #[repr(C)] pub struct VirtqAvail { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [u16; 0], } #[repr(C)] pub struct VirtqUsed { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [VirtqUsedElem; 0], } #[repr(C)] #[derive(Clone, Default)] pub struct VirtqUsedElem { /* Index of start of used descriptor chain. */ pub id: u16, pub _padding: u16, /* Total length of the descriptor chain which was written to. */ pub len: u32, } pub trait Ring { type Element; fn ring(&self) -> *const Self::Element; fn ring_mut(&mut self) -> *mut Self::Element; } impl Ring for VirtqAvail { type Element = u16; fn ring(&self) -> *const u16 { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut u16 { self.ring.as_mut_ptr() } } impl Ring for VirtqUsed { type Element = VirtqUsedElem; fn ring(&self) -> *const VirtqUsedElem { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut VirtqUsedElem { self.ring.as_mut_ptr() } } #[repr(C)] #[derive(Debug)] pub struct VirtioNetCtrl<T: VirtioNetCtrlCommand> { pub class: u8, pub command: u8, pub command_data: T, pub ack: u8, } impl<T: VirtioNetCtrlCommand> From<T> for VirtioNetCtrl<T> { fn from(command_data: T) -> VirtioNetCtrl<T> { VirtioNetCtrl { class: T::CLASS, command: T::COMMAND, command_data, ack: 0, } } } /// A specific command to be sent through the control queue (wrapped in a [`VirtioNetCtrl`]) pub trait VirtioNetCtrlCommand { const CLASS: u8; const COMMAND: u8; } #[derive(Debug)] pub struct VirtioNetCtrlPromisc(u8); impl VirtioNetCtrlCommand for VirtioNetCtrlPromisc { const CLASS: u8 = VIRTIO_NET_CTRL_RX; const COMMAND: u8 = VIRTIO_NET_CTRL_RX_PROMISC; } impl VirtioNetCtrlPromisc { pub fn new(on: bool) -> VirtioNetCtrlPromisc { VirtioNetCtrlPromisc(on as u8) } } #[cfg(test)] mod tests { use super::*; use std::mem; #[test] fn static_type_sizes() { assert_eq!(mem::size_of::<VirtioNetCtrl<VirtioNetCtrlPromisc>>(), 4); } }
random_line_split
virtio_constants.rs
#![allow(dead_code)] #![allow(clippy::all)] // Copied from the ixy C driver // Amended with updates from the newer Virtio spec v1.1 /*- * BSD LICENSE * * Copyright(c) 2010-2014 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* * VirtIO Header, located in BAR 0. */ pub const VIRTIO_PCI_HOST_FEATURES: u64 = 0; /* host's supported features (32bit, RO)*/ pub const VIRTIO_PCI_GUEST_FEATURES: u64 = 4; /* guest's supported features (32, RW) */ pub const VIRTIO_PCI_QUEUE_PFN: u64 = 8; /* physical address of VQ (32, RW) */ pub const VIRTIO_PCI_QUEUE_NUM: u64 = 12; /* number of ring entries (16, RO) */ pub const VIRTIO_PCI_QUEUE_SEL: u64 = 14; /* current VQ selection (16, RW) */ pub const VIRTIO_PCI_QUEUE_NOTIFY: u64 = 16; /* notify host regarding VQ (16, RW) */ pub const VIRTIO_PCI_STATUS: u64 = 18; /* device status register (8, RW) */ pub const VIRTIO_PCI_ISR: u64 = 19; /* interrupt status register, reading also clears the register (8, RO) */ /* Only if MSIX is enabled: */ pub const VIRTIO_MSI_CONFIG_VECTOR: u64 = 20; /* configuration change vector (16, RW) */ pub const VIRTIO_MSI_QUEUE_VECTOR: u64 = 22; /* vector for selected VQ notifications (16, RW) */ /* Status byte for guest to report progress. */ pub const VIRTIO_CONFIG_STATUS_RESET: u8 = 0x00; pub const VIRTIO_CONFIG_STATUS_ACK: u8 = 0x01; pub const VIRTIO_CONFIG_STATUS_DRIVER: u8 = 0x02; pub const VIRTIO_CONFIG_STATUS_DRIVER_OK: u8 = 0x04; pub const VIRTIO_CONFIG_STATUS_FEATURES_OK: u8 = 0x08; pub const VIRTIO_CONFIG_STATUS_FAILED: u8 = 0x80; /* * How many bits to shift physical queue address written to QUEUE_PFN. * 12 is historical, and due to x86 page size. */ pub const VIRTIO_PCI_QUEUE_ADDR_SHIFT: usize = 12; /* This marks a buffer as continuing via the next field. */ pub const VIRTQ_DESC_F_NEXT: u16 = 1; /* This marks a buffer as write-only (otherwise read-only). */ pub const VIRTQ_DESC_F_WRITE: u16 = 2; /* This means the buffer contains a list of buffer descriptors. */ pub const VIRTQ_DESC_F_INDIRECT: u16 = 4; /* The feature bitmap for virtio net */ pub const VIRTIO_NET_F_CSUM: usize = 0; /* Host handles pkts w/ partial csum */ pub const VIRTIO_NET_F_GUEST_CSUM: usize = 1; /* Guest handles pkts w/ partial csum */ pub const VIRTIO_NET_F_MTU: usize = 3; /* Initial MTU advice. */ pub const VIRTIO_NET_F_MAC: usize = 5; /* Host has given MAC address. */ pub const VIRTIO_NET_F_GUEST_TSO4: usize = 7; /* Guest can handle TSOv4 in. */ pub const VIRTIO_NET_F_GUEST_TSO6: usize = 8; /* Guest can handle TSOv6 in. */ pub const VIRTIO_NET_F_GUEST_ECN: usize = 9; /* Guest can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_GUEST_UFO: usize = 10; /* Guest can handle UFO in. */ pub const VIRTIO_NET_F_HOST_TSO4: usize = 11; /* Host can handle TSOv4 in. */ pub const VIRTIO_NET_F_HOST_TSO6: usize = 12; /* Host can handle TSOv6 in. */ pub const VIRTIO_NET_F_HOST_ECN: usize = 13; /* Host can handle TSO[6] w/ ECN in. */ pub const VIRTIO_NET_F_HOST_UFO: usize = 14; /* Host can handle UFO in. */ pub const VIRTIO_NET_F_MRG_RXBUF: usize = 15; /* Host can merge receive buffers. */ pub const VIRTIO_NET_F_STATUS: usize = 16; /* virtio_net_config.status available */ pub const VIRTIO_NET_F_CTRL_VQ: usize = 17; /* Control channel available */ pub const VIRTIO_NET_F_CTRL_RX: usize = 18; /* Control channel RX mode support */ pub const VIRTIO_NET_F_CTRL_VLAN: usize = 19; /* Control channel VLAN filtering */ pub const VIRTIO_NET_F_CTRL_RX_EXTRA: usize = 20; /* Extra RX mode control support */ pub const VIRTIO_NET_F_GUEST_ANNOUNCE: usize = 21; /* Guest can announce device on the network */ pub const VIRTIO_NET_F_MQ: usize = 22; /* Device supports Receive Flow Steering */ pub const VIRTIO_NET_F_CTRL_MAC_ADDR: usize = 23; /* Set MAC address */ /* Do we get callbacks when the ring is completely used, even if we've suppressed them? */ pub const VIRTIO_F_NOTIFY_ON_EMPTY: usize = 24; /* Can the device handle any descriptor layout? */ pub const VIRTIO_F_ANY_LAYOUT: usize = 27; /* We support indirect buffer descriptors */ pub const VIRTIO_RING_F_INDIRECT_DESC: usize = 28; pub const VIRTIO_F_VERSION_1: usize = 32; pub const VIRTIO_F_IOMMU_PLATFORM: usize = 33; /** * Control the RX mode, ie. promiscuous, allmulti, etc... * All commands require an "out" sg entry containing a 1 byte * state value, zero = disable, non-zero = enable. Commands * 0 and 1 are supported with the VIRTIO_NET_F_CTRL_RX feature. * Commands 2-5 are added with VIRTIO_NET_F_CTRL_RX_EXTRA. */ pub const VIRTIO_NET_CTRL_RX: u8 = 0; pub const VIRTIO_NET_CTRL_RX_PROMISC: u8 = 0; pub const VIRTIO_NET_CTRL_RX_ALLMULTI: u8 = 1; pub const VIRTIO_NET_CTRL_RX_ALLUNI: u8 = 2; pub const VIRTIO_NET_CTRL_RX_NOMULTI: u8 = 3; pub const VIRTIO_NET_CTRL_RX_NOUNI: u8 = 4; pub const VIRTIO_NET_CTRL_RX_NOBCAST: u8 = 5; pub const VIRTIO_NET_OK: u8 = 0; pub const VIRTIO_NET_ERR: u8 = 1; pub const VIRTIO_MAX_CTRL_DATA: usize = 2048; /** * This is the first element of the scatter-gather list. If you don't * specify GSO or CSUM features, you can simply ignore the header. */ #[repr(C)] pub struct virtio_net_hdr { pub flags: u8, pub gso_type: u8, pub hdr_len: u16, // Ethernet + IP + tcp/udp hdrs pub gso_size: u16, // Bytes to append to hdr_len per frame pub csum_start: u16, // Position to start checksumming from pub csum_offset: u16, // Offset after that to place checksum } pub const VIRTIO_NET_HDR_F_NEEDS_CSUM: u8 = 1; /**< Use csum_start,csum_offset*/ pub const VIRTIO_NET_HDR_F_DATA_VALID: u8 = 2; /**< Checksum is valid */ pub const VIRTIO_NET_HDR_GSO_NONE: u8 = 0; /**< Not a GSO frame */ pub const VIRTIO_NET_HDR_GSO_TCPV4: u8 = 1; /**< GSO frame, IPv4 TCP (TSO) */ pub const VIRTIO_NET_HDR_GSO_UDP: u8 = 3; /**< GSO frame, IPv4 UDP (UFO) */ pub const VIRTIO_NET_HDR_GSO_TCPV6: u8 = 4; /**< GSO frame, IPv6 TCP */ pub const VIRTIO_NET_HDR_GSO_ECN: u8 = 0x80; /**< TCP has ECN set */ /* The Host uses this in used->flags to advise the Guest: don't kick me * when you add a buffer. It's unreliable, so it's simply an * optimization. Guest will still kick if it's out of buffers. */ pub const VIRTQ_USED_F_NO_NOTIFY: u16 = 1; /* The Guest uses this in avail->flags to advise the Host: don't * interrupt me when you consume a buffer. It's unreliable, so it's * simply an optimization. */ pub const VIRTQ_AVAIL_F_NO_INTERRUPT: u16 = 1; use std::num::Wrapping; /* VirtIO ring descriptors: 16 bytes. * These can chain together via "next". */ #[repr(C)] #[derive(Default)] pub struct VirtqDesc { pub addr: usize, /* Address (guest-physical). */ pub len: u32, /* Length. */ pub flags: u16, /* The flags as indicated above. */ pub next: u16, /* We chain unused descriptors via this. */ } #[repr(C)] pub struct VirtqAvail { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [u16; 0], } #[repr(C)] pub struct VirtqUsed { pub flags: u16, pub idx: Wrapping<u16>, pub ring: [VirtqUsedElem; 0], } #[repr(C)] #[derive(Clone, Default)] pub struct VirtqUsedElem { /* Index of start of used descriptor chain. */ pub id: u16, pub _padding: u16, /* Total length of the descriptor chain which was written to. */ pub len: u32, } pub trait Ring { type Element; fn ring(&self) -> *const Self::Element; fn ring_mut(&mut self) -> *mut Self::Element; } impl Ring for VirtqAvail { type Element = u16; fn ring(&self) -> *const u16 { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut u16 { self.ring.as_mut_ptr() } } impl Ring for VirtqUsed { type Element = VirtqUsedElem; fn ring(&self) -> *const VirtqUsedElem { self.ring.as_ptr() } fn ring_mut(&mut self) -> *mut VirtqUsedElem { self.ring.as_mut_ptr() } } #[repr(C)] #[derive(Debug)] pub struct VirtioNetCtrl<T: VirtioNetCtrlCommand> { pub class: u8, pub command: u8, pub command_data: T, pub ack: u8, } impl<T: VirtioNetCtrlCommand> From<T> for VirtioNetCtrl<T> { fn
(command_data: T) -> VirtioNetCtrl<T> { VirtioNetCtrl { class: T::CLASS, command: T::COMMAND, command_data, ack: 0, } } } /// A specific command to be sent through the control queue (wrapped in a [`VirtioNetCtrl`]) pub trait VirtioNetCtrlCommand { const CLASS: u8; const COMMAND: u8; } #[derive(Debug)] pub struct VirtioNetCtrlPromisc(u8); impl VirtioNetCtrlCommand for VirtioNetCtrlPromisc { const CLASS: u8 = VIRTIO_NET_CTRL_RX; const COMMAND: u8 = VIRTIO_NET_CTRL_RX_PROMISC; } impl VirtioNetCtrlPromisc { pub fn new(on: bool) -> VirtioNetCtrlPromisc { VirtioNetCtrlPromisc(on as u8) } } #[cfg(test)] mod tests { use super::*; use std::mem; #[test] fn static_type_sizes() { assert_eq!(mem::size_of::<VirtioNetCtrl<VirtioNetCtrlPromisc>>(), 4); } }
from
identifier_name
debugging_errors.py
# Debugging & Errors While you've likely gotten an error or two in your programming journey thus far (we all have!), as you start writing functions, having strong debugging skills and understanding the error messages you receive while debugging will be critical. In this chapter, we'll discuss the various types of errors you'll encounter in Python, introduce the idea of `try` and `except` while debugging and work through an example of the thought process behind debugging upon ecountering an error. <div class="alert alert-success"> <b>Debugging</b> is the process of finding and fixing errors in a computer program. </div> ## Motivating Example #1 To start thinking about debugging, let's consider an example. Here we have a function. The pieces of this function should all look familiar at this point. There's variable assignment, a for loop, some use of the addition operator (`+`), indexing, and a `return` statement. Take a look at this code and ask yourself: Will I be able to define and execute this function? In other words, is this a valid function in Python? def example_function(input_list): running_sum = 0 for item in input_list: running_sum = running_sum + item special_value = input_list[3] return running_sum + special_value Well, to answer this question you could consider the following call of the function: example_function([1, 2, 3, 4]) The above example of the function executes and returns the value 14, the sum of all the values in the input list plus the value stored in the fourth position of the input list. But, what about the following function call: example_function([1, 2, 3]) Here, we get an `IndexError`, as there is no fourth index of the input list. Python runs into an error when trying to execute the following line of the function: `special_value = input_list[3]` So, the successful execute of the function is dependent upon the input provided. Let's look at one more example: example_function(['s', 'h', 'a', 'n']) The following example will also error; however, this time it's a `TypeError` rather than an `IndexError`. This occurs when Python tries to execute the `running_sum = running_sum + item` line of the function. In this case, a string is attempted to be added to an integer, which is not something Python can do. Thus, another error. Being able to adapt your code so that it does what you want it to do requires that you both understand these errors *and* know how to fix it. So, let's get started understanding what each of these errors means. ## Errors Errors are enountered when the code you've tried to execute is unable to run. These interruptions can occur for a number of different reasons. We'll explore each of these now. <div class="alert alert-success"> <b>Errors</b> are problems with code definition or execution that interrupt running Python code. </div> ### Syntax Errors Syntax errors occur when the code you've written fails to follow the rules of Python. It will fail under any and all circumstances. These include `SyntaxErrors` and `IndentationErrors`. <div class="alert alert-success"> Syntax & Indentation Errors reflect code that doesn't follow Python structure, and will necessarily fail. </div> ### Syntax Errors For example, if you try to execute the following conditional statement, it will fail. This will happen regardless of the code you include within your conditional or what conditional you specify becuase a colon is missing after your `if` statement. Notice in the output that Python does it's best to point you in the right direction using a `^` to highlight where in your code Python encoungered an error. And, Python lets you know this is a `SyntaxError`, so you're clued into the fact that there's something wrong with your code's structure. # will produce a syntax error if True print('Yep.') ### Indentation Errors Indentation errors occur when Python was expecting something to be have a certain indentation level but instead encountered something different. Remember, Python cares about whitespace, so if you fail to adhere to what Python is expecting, you will get an `IndentationError`. # will produce a syntax error # and specifically an indentation error my_list = [1, 2] for value in my_list: print(value) In the above example, Python again gives you a readout about what it was expecting and where you appear to have gone wrong, letting you know that it encountered an `IndentationError` as it expected `print(value)` to be indented within your `for` loop. For `SyntaxError`s and `IndentationError`s, the rest of your code may very well be fine. Once you fix the structure or syntax of your code, the error will likely be resolved. ## Exceptions Unlike syntax errors, exceptions are errors that occur due to the specific code you tried to execute. In these cases, the syntax or structure of your code looks fine, but an error occurred when Python tried to execute your code, resulting in an error. <div class="alert alert-success"> Exceptions are errors that occur when a code is executed. </div> ### ZeroDivisionError A `ZeroDivisionError` occurs when you try to divide by zero. Sometimes this will be very obvious, such as if you directly try to divide by zero, as Python does not know how to divide by zero. # produces ZeroDivisionError 1 / 0 However, more likely you'll encounter a `ZeroDivisionError` when looping through a list or using a conditional. In these cases, you'll have to dig through your code and how you tried to execute that code to determine where your code tried to divide by zero. For example, in the following cell, you see code that is syntactically fine. However, when the loop gets to the third index in my_list, the `temp = val / (val - 4)` attempts to divide by zero, leading Python to return a `ZeroDivisionError` # produces a ZeroDivisionError running_sum = 0 my_list = [1, 2, 3, 4, 5] for val in my_list: if val % 2 == 0: temp = val / (val - 4) running_sum += temp ### NameError A `NameError` occurs when you try to access a name that Python does not know. For example, if you define a variable with the name `variable` and then try to access `varaible` (`variable` with a typo), you will receive a `NameError`. # Define a variable variable = 12 # If you typo a name, you will get a NameError varaible Whenever you see a `NameError`, consider whether you've misspelled or mistyped something. Look through your code carefully as they can somteimes be hard to spot visually. And, while it's annoying, it's helpful that Python doesn't just _guess_ that you _meant_ 'variable'....because sometimes Python would guess wrong. It's better for Python to just give us the error. Finally, you'll also get a `NamerError` if you try to use the equality operator (`==`) when you meant to use the assignment operator (`=`). Here, since `new_variable` hasn't yet been defined, when Python tries to determine if it is equl to `1`, a `NameError` is returned, as `new_variable` does not exist. # You also get a name error if you try to use the wrong operator for assignment new_variable == 1 ### IndexError Similarly, an `IndexError` occurs when you try to access an index that doesn't exist. For example, the following list has three elements, if you try to access the fourth element (index position 5), you'll recieve an `IndexError` with a note that the index is out of range: my_list = [1, 2, 3] my_list[5] Note that this applies to any collection where indexing applies, such as tuples, dictionaries, or strings. If you try to access the value for a key that does not exist in a dictionary, for example, you will again receive an Error. Here, it is specifically a `KeyError`. # Relatedly, 'KeyError' occurs if you ask for a dictionary key that doesn't exist my_dictionary = {'name1' : 1, 'name2' : 2} my_dictionary['name3'] ### ValueError A `ValueError` occurs when you try to use an illegal value for something. For example, if you try to make an integer out of a string, you'll receive a `ValueError`: int('cat') ### TypeError Finally, a `TypeError` occurs when you try to operate on a variable in a way that Python is unabe to interpret given its type. For example, `+` concatenates strings and adds integers. When you try to combine those two types of variables, Python is unable to determine whether it shoudl concatenate or add. As such, a `TypeError` is returned. 'a_string' + 12 ## Stack Trace The **stack trace** is a log of what Python did as it went through your code. This ets printed out if Python runs into an error. running_sum = 0 my_list = [1, 2, 3, 4, 5] for val in my_list: if val % 2 == 0: temp = val / (val - 4) #+= allows you to add the value on the right to the variable on the left # and assign it to the variable on the left running_sum += temp # equivalent to: # running_sum = running_sum + temp Sometimes these get really complex. With practice, you'll get better at interpreting these traces, but for now notice that there is often either an arrow (`---->`) or a caret (`^`) indicating at which line or at which point in your code Python encountered the error. Focusing at these points in the error message and reading the error message at the end (i.e. `ZeroDivisionError: division by zero` in the example above) is a good place to start when trying to decipher what the error means. As you're learning, do your very best to read the message and try to understand it. It can be tempting to be overwhelmed and ignore these if you don't understand the error at first glance. Spending a few seconds longer to understand it can save you a lot of time in the long run. ## Try / Except
### `try`/`except` Block The general structure of a `try`/`except` blog is as follows: ```python try: # Tries to do this code pass # pass just says is not an operation; carry on except: # If there is an error (an exception), keep going and do this instead pass ``` For example, if, we wanted to ask the user to input a number, we could do so using `input()`. The string inside the `input` function ('Please type a number: ') is what is displayed to the left of the box where the user enters their input. Whatever the user types will, in the code example below, be stored in the variable `my_num`. In the example below, if the user were to type 'shannon', the code would print `my_num is: shannon`. And, that doesn't make a ton of sense...since 'shannon' is not a number. Fortunately, we can use a `try`/`except` block to handle this and only accept the input from the user once it is, in fact, a number. # Example: we want to get an input number from the user my_num = input('Please type a number: ') print('\nmy_num is: ', my_num) To build up to this, we first need to think about the logic. We want to *first* `try` to get the `input` from the user. And, we want to typecast (meaning specify the type that input should be) that input into an integer using `int()`. However, what if we input the string 'shannon' again? As we saw above `int('shannon')` would raise an exception. # this raises an error int('shannon') This is where `try`/`except` comes in handy! We can first `try` to run the code in the `try` code block; however, *if an exception is raised* (which will happen if we type 'shannon' as input), the code in the `except` block will execute instead. # with a string as input try: int(input('Please type an integer: ')) except: print('nahhh') In the above example, we see that rather than raising a `ValueError`, instead, the code in the `except` block has executed instead, printing 'nahhh'. Note that if the user input a number, like 29, for example, the `except` block would never execute, since the `try` block would not have raised an exception, as we see here, where 'nahhh' is not printed: # with a number as input try: int(input('Please type an integer: ')) except: print('nahhh') In the above example, the code terminates even when the user has not followed instructions. What if we wanted to keep asking the user until they get it right? This sounds like the time for a `while` loop, where we'll loop until the user input is valid! In the example here we see that `as_for_num` is `True` to start, so our `while` loop will continue to loop until `as_for_num` becomes `False`. We see that happens inside the `try` block, but only after `my_num` stores an integer. If, in trying to get an input from the user, the code raises an exception `'Oops! That was no valid number. Try again'` will instead be printed, and the loop will execute again asking for the user's input once more. The `print()` statement at the end of this code cell will only execute once the `while` loop has terminated. ask_for_num = True while ask_for_num: try: my_num = int(input('Please enter a number: ')) ask_for_num = False except ValueError: print('Oops! That was no valid number. Try again!') print('\nmy_num is: ', my_num) ### Example: `try`/`except` In the above worked example, we saw how a `try`/`except` works, specifically seeing its functionality within a `while` loop. However, this code construct can be used in combination with various code constructs we've discussed thus far, including within functions. For example, if you were to define the function `divide()` as you see here... def divide(num1, num2): return num1 / num2 ...you would likely quickly intuit that this function would execute without issue for some input values, but raise a `ZeroDivisionError` whenever `num2` was zero. # executes without issue divide(2, 2) # raises an error divide(2, 0) What if we want our function to execute, even if `num2` is zero? In this case, we could include a `try`/`except` within our function. Here, we'll call our function `safe_divide`. Note that it still takes in two parameters (`num1`, `num2`); however, if `output = num1 / num2` raises an exception, `output` will store `None`. Whatever is stored in `output` after the `try`/`except` block will be returned from the function. # define a function safe_divide def safe_divide(num1, num2): try: output = num1 / num2 except ZeroDivisionError: output = None return output # same output as above safe_divide(2, 2) # no longer raises an error safe_divide(2, 0) ## Debugging Strategies When you've encountered an error of some sort in your code and you aren't sure where to go next to fix your code, the following strategies/approach could help. For example, what if you've been asked to "write code that will check whether the value stored in `my_val` is both positive and even. If it is, store the integer `1` in the variable `output`" And, you think to yourself: *How do I check whether a variable stores an even value?* You've got *some idea* of how to approach this, but you aren't quite ready to write actual python code with correct syntax. Instead you attempt **strategy #1: write some pseudocode**. Pseudocode helps structure your thinking and gets your ideas written down, but doesn't have to have correct syntax. The psedocode (here just a bunch of comments) you start with is below: # variable my_val # if my_val is positive (> 0) and even: # output stores 1 With the pseudocode above you've got something that looks a little like python, but wouldn't execute. However, the ideas are there. We can see that `if` whatever is stored in `my_val` is positive and even, `output` will store the value 1. Okay, that's a start. And, while you know how to determine if a variable is positive (defined as greater than zero), you aren't totally sure how to determien if a variable stores an even value. So, you turn to **strategy #2**: **add "in python" to your search on the Internet** Your search term: "How to check whether a variable stores an even value _in python_." The Internet comes through for you explaining that if you check if the remainder after dividing a number by 2 is equal to zero, that number is even...and likely even gives you some python code in the example (`val % 2 == 0`). With this, you try to incorporate it into what you've already started. # this code errors my_val = 6 if my_val > 0 and my_val % 2 = 0: output == 1 So, you've now got some code written, and you really thought it was going to be correct, but you've now got an error. So, you read the SyntaxError and you're not really sure what the issue is. So, you consider **strategy #3: adding `print()` statements**. You think through the logic of your code and you say to yourself "OK, if `my_val` is 6, then the condition `my_val > 0` should be true, because `my_val` is greater than zero and `my_val % 2 = 0` should be true, since 6 is even. If those are both true, the code within the conditional code block should execute. If that happens, we could add a `print()` statement within the conditional, knowing that it would only print() something if my logic were true!" With that thought, you add a `print()` statement and re-execute your code: # this code errors my_val = 6 if my_val > 0 and my_val % 2 = 0: print('this should print!') output == 1 You see that you still get the `SyntaxError`, which makes sense, since we didn't actually change the code yet, but you *also* notice that the `print()` statement was never executed. At this point you know the error in your code happens *before* that `print()` statement...since nothing `print`ed. So, you revisit your conditional and the error message, and at this point you realize that the caret (`^`) is indicating there's some issue with the statement `my_val % 2 = 0`. Suddenly it finally hits you! You wanted to check for equality (`==`), not assign (`=`). You fix your code changing the assignment operator to an equality operator. # this code errors my_val = 6 if my_val > 0 and my_val % 2 == 0: print('this should print!') output == 1 At this point, we *still* get an error *but*, you'll notice 1) it's a *different* error and 2) our `print()` statement is now encountered. We know here that the conditional is now correct! So, we read our error message and see it says "'output' is not defined." And, you look at your code and realize, ah - the same mistake in reverse. The final statement `output == 1` is a statement of assignment, storing `1` in `output`. You fix your code and re-execute: my_val = 6 if my_val > 0 and my_val % 2 == 0: print('this should print!') output = 1 Note that your `print()` statement will `print()` unless it's removed, so your last step to clean up your code now that you're done with debugging is to remove it. With that, you've accomplished your task! my_val = 6 if my_val > 0 and my_val % 2 == 0: output = 1 output ## Exercises Q1. **What type of error would each of the following return?** ```python int('six') if num > 0 print("Greater than 0") if num > 0: print("Greater than 0") ``` Q2. **Given the function `my_function` provided, understand the function and then debug the function so that each of the following test cases pass without producing an error**: ```python def my_function(input_1, input_2): """A long function that might error.""" if len(input_1) > 1 if input_1[0] = 0: answer = 0 elif len(input_2) == 2: answer = input_2[1] / input_1[0] elif len(input_1) == len(input_2): answer = input_1[0] + input_2[0] print(answer) ``` Test cases: ```python my_function([0, 1], [0]) my_function([0], [0, 1]) my_function([1], [1]) my_function([1], ['1']) ```
While *syntax errors* will necessarily fail, *exceptions* do not necessarily have to lead to breaking the program - they can be programmatically dealt with, using `try` and `except`. A `try`/`except` block allows you to try some code. If, in attempting to execute that code, an exeption is encountered, Python will instead execute the code in the `except` code block.
random_line_split
debugging_errors.py
# Debugging & Errors While you've likely gotten an error or two in your programming journey thus far (we all have!), as you start writing functions, having strong debugging skills and understanding the error messages you receive while debugging will be critical. In this chapter, we'll discuss the various types of errors you'll encounter in Python, introduce the idea of `try` and `except` while debugging and work through an example of the thought process behind debugging upon ecountering an error. <div class="alert alert-success"> <b>Debugging</b> is the process of finding and fixing errors in a computer program. </div> ## Motivating Example #1 To start thinking about debugging, let's consider an example. Here we have a function. The pieces of this function should all look familiar at this point. There's variable assignment, a for loop, some use of the addition operator (`+`), indexing, and a `return` statement. Take a look at this code and ask yourself: Will I be able to define and execute this function? In other words, is this a valid function in Python? def example_function(input_list):
Well, to answer this question you could consider the following call of the function: example_function([1, 2, 3, 4]) The above example of the function executes and returns the value 14, the sum of all the values in the input list plus the value stored in the fourth position of the input list. But, what about the following function call: example_function([1, 2, 3]) Here, we get an `IndexError`, as there is no fourth index of the input list. Python runs into an error when trying to execute the following line of the function: `special_value = input_list[3]` So, the successful execute of the function is dependent upon the input provided. Let's look at one more example: example_function(['s', 'h', 'a', 'n']) The following example will also error; however, this time it's a `TypeError` rather than an `IndexError`. This occurs when Python tries to execute the `running_sum = running_sum + item` line of the function. In this case, a string is attempted to be added to an integer, which is not something Python can do. Thus, another error. Being able to adapt your code so that it does what you want it to do requires that you both understand these errors *and* know how to fix it. So, let's get started understanding what each of these errors means. ## Errors Errors are enountered when the code you've tried to execute is unable to run. These interruptions can occur for a number of different reasons. We'll explore each of these now. <div class="alert alert-success"> <b>Errors</b> are problems with code definition or execution that interrupt running Python code. </div> ### Syntax Errors Syntax errors occur when the code you've written fails to follow the rules of Python. It will fail under any and all circumstances. These include `SyntaxErrors` and `IndentationErrors`. <div class="alert alert-success"> Syntax & Indentation Errors reflect code that doesn't follow Python structure, and will necessarily fail. </div> ### Syntax Errors For example, if you try to execute the following conditional statement, it will fail. This will happen regardless of the code you include within your conditional or what conditional you specify becuase a colon is missing after your `if` statement. Notice in the output that Python does it's best to point you in the right direction using a `^` to highlight where in your code Python encoungered an error. And, Python lets you know this is a `SyntaxError`, so you're clued into the fact that there's something wrong with your code's structure. # will produce a syntax error if True print('Yep.') ### Indentation Errors Indentation errors occur when Python was expecting something to be have a certain indentation level but instead encountered something different. Remember, Python cares about whitespace, so if you fail to adhere to what Python is expecting, you will get an `IndentationError`. # will produce a syntax error # and specifically an indentation error my_list = [1, 2] for value in my_list: print(value) In the above example, Python again gives you a readout about what it was expecting and where you appear to have gone wrong, letting you know that it encountered an `IndentationError` as it expected `print(value)` to be indented within your `for` loop. For `SyntaxError`s and `IndentationError`s, the rest of your code may very well be fine. Once you fix the structure or syntax of your code, the error will likely be resolved. ## Exceptions Unlike syntax errors, exceptions are errors that occur due to the specific code you tried to execute. In these cases, the syntax or structure of your code looks fine, but an error occurred when Python tried to execute your code, resulting in an error. <div class="alert alert-success"> Exceptions are errors that occur when a code is executed. </div> ### ZeroDivisionError A `ZeroDivisionError` occurs when you try to divide by zero. Sometimes this will be very obvious, such as if you directly try to divide by zero, as Python does not know how to divide by zero. # produces ZeroDivisionError 1 / 0 However, more likely you'll encounter a `ZeroDivisionError` when looping through a list or using a conditional. In these cases, you'll have to dig through your code and how you tried to execute that code to determine where your code tried to divide by zero. For example, in the following cell, you see code that is syntactically fine. However, when the loop gets to the third index in my_list, the `temp = val / (val - 4)` attempts to divide by zero, leading Python to return a `ZeroDivisionError` # produces a ZeroDivisionError running_sum = 0 my_list = [1, 2, 3, 4, 5] for val in my_list: if val % 2 == 0: temp = val / (val - 4) running_sum += temp ### NameError A `NameError` occurs when you try to access a name that Python does not know. For example, if you define a variable with the name `variable` and then try to access `varaible` (`variable` with a typo), you will receive a `NameError`. # Define a variable variable = 12 # If you typo a name, you will get a NameError varaible Whenever you see a `NameError`, consider whether you've misspelled or mistyped something. Look through your code carefully as they can somteimes be hard to spot visually. And, while it's annoying, it's helpful that Python doesn't just _guess_ that you _meant_ 'variable'....because sometimes Python would guess wrong. It's better for Python to just give us the error. Finally, you'll also get a `NamerError` if you try to use the equality operator (`==`) when you meant to use the assignment operator (`=`). Here, since `new_variable` hasn't yet been defined, when Python tries to determine if it is equl to `1`, a `NameError` is returned, as `new_variable` does not exist. # You also get a name error if you try to use the wrong operator for assignment new_variable == 1 ### IndexError Similarly, an `IndexError` occurs when you try to access an index that doesn't exist. For example, the following list has three elements, if you try to access the fourth element (index position 5), you'll recieve an `IndexError` with a note that the index is out of range: my_list = [1, 2, 3] my_list[5] Note that this applies to any collection where indexing applies, such as tuples, dictionaries, or strings. If you try to access the value for a key that does not exist in a dictionary, for example, you will again receive an Error. Here, it is specifically a `KeyError`. # Relatedly, 'KeyError' occurs if you ask for a dictionary key that doesn't exist my_dictionary = {'name1' : 1, 'name2' : 2} my_dictionary['name3'] ### ValueError A `ValueError` occurs when you try to use an illegal value for something. For example, if you try to make an integer out of a string, you'll receive a `ValueError`: int('cat') ### TypeError Finally, a `TypeError` occurs when you try to operate on a variable in a way that Python is unabe to interpret given its type. For example, `+` concatenates strings and adds integers. When you try to combine those two types of variables, Python is unable to determine whether it shoudl concatenate or add. As such, a `TypeError` is returned. 'a_string' + 12 ## Stack Trace The **stack trace** is a log of what Python did as it went through your code. This ets printed out if Python runs into an error. running_sum = 0 my_list = [1, 2, 3, 4, 5] for val in my_list: if val % 2 == 0: temp = val / (val - 4) #+= allows you to add the value on the right to the variable on the left # and assign it to the variable on the left running_sum += temp # equivalent to: # running_sum = running_sum + temp Sometimes these get really complex. With practice, you'll get better at interpreting these traces, but for now notice that there is often either an arrow (`---->`) or a caret (`^`) indicating at which line or at which point in your code Python encountered the error. Focusing at these points in the error message and reading the error message at the end (i.e. `ZeroDivisionError: division by zero` in the example above) is a good place to start when trying to decipher what the error means. As you're learning, do your very best to read the message and try to understand it. It can be tempting to be overwhelmed and ignore these if you don't understand the error at first glance. Spending a few seconds longer to understand it can save you a lot of time in the long run. ## Try / Except While *syntax errors* will necessarily fail, *exceptions* do not necessarily have to lead to breaking the program - they can be programmatically dealt with, using `try` and `except`. A `try`/`except` block allows you to try some code. If, in attempting to execute that code, an exeption is encountered, Python will instead execute the code in the `except` code block. ### `try`/`except` Block The general structure of a `try`/`except` blog is as follows: ```python try: # Tries to do this code pass # pass just says is not an operation; carry on except: # If there is an error (an exception), keep going and do this instead pass ``` For example, if, we wanted to ask the user to input a number, we could do so using `input()`. The string inside the `input` function ('Please type a number: ') is what is displayed to the left of the box where the user enters their input. Whatever the user types will, in the code example below, be stored in the variable `my_num`. In the example below, if the user were to type 'shannon', the code would print `my_num is: shannon`. And, that doesn't make a ton of sense...since 'shannon' is not a number. Fortunately, we can use a `try`/`except` block to handle this and only accept the input from the user once it is, in fact, a number. # Example: we want to get an input number from the user my_num = input('Please type a number: ') print('\nmy_num is: ', my_num) To build up to this, we first need to think about the logic. We want to *first* `try` to get the `input` from the user. And, we want to typecast (meaning specify the type that input should be) that input into an integer using `int()`. However, what if we input the string 'shannon' again? As we saw above `int('shannon')` would raise an exception. # this raises an error int('shannon') This is where `try`/`except` comes in handy! We can first `try` to run the code in the `try` code block; however, *if an exception is raised* (which will happen if we type 'shannon' as input), the code in the `except` block will execute instead. # with a string as input try: int(input('Please type an integer: ')) except: print('nahhh') In the above example, we see that rather than raising a `ValueError`, instead, the code in the `except` block has executed instead, printing 'nahhh'. Note that if the user input a number, like 29, for example, the `except` block would never execute, since the `try` block would not have raised an exception, as we see here, where 'nahhh' is not printed: # with a number as input try: int(input('Please type an integer: ')) except: print('nahhh') In the above example, the code terminates even when the user has not followed instructions. What if we wanted to keep asking the user until they get it right? This sounds like the time for a `while` loop, where we'll loop until the user input is valid! In the example here we see that `as_for_num` is `True` to start, so our `while` loop will continue to loop until `as_for_num` becomes `False`. We see that happens inside the `try` block, but only after `my_num` stores an integer. If, in trying to get an input from the user, the code raises an exception `'Oops! That was no valid number. Try again'` will instead be printed, and the loop will execute again asking for the user's input once more. The `print()` statement at the end of this code cell will only execute once the `while` loop has terminated. ask_for_num = True while ask_for_num: try: my_num = int(input('Please enter a number: ')) ask_for_num = False except ValueError: print('Oops! That was no valid number. Try again!') print('\nmy_num is: ', my_num) ### Example: `try`/`except` In the above worked example, we saw how a `try`/`except` works, specifically seeing its functionality within a `while` loop. However, this code construct can be used in combination with various code constructs we've discussed thus far, including within functions. For example, if you were to define the function `divide()` as you see here... def divide(num1, num2): return num1 / num2 ...you would likely quickly intuit that this function would execute without issue for some input values, but raise a `ZeroDivisionError` whenever `num2` was zero. # executes without issue divide(2, 2) # raises an error divide(2, 0) What if we want our function to execute, even if `num2` is zero? In this case, we could include a `try`/`except` within our function. Here, we'll call our function `safe_divide`. Note that it still takes in two parameters (`num1`, `num2`); however, if `output = num1 / num2` raises an exception, `output` will store `None`. Whatever is stored in `output` after the `try`/`except` block will be returned from the function. # define a function safe_divide def safe_divide(num1, num2): try: output = num1 / num2 except ZeroDivisionError: output = None return output # same output as above safe_divide(2, 2) # no longer raises an error safe_divide(2, 0) ## Debugging Strategies When you've encountered an error of some sort in your code and you aren't sure where to go next to fix your code, the following strategies/approach could help. For example, what if you've been asked to "write code that will check whether the value stored in `my_val` is both positive and even. If it is, store the integer `1` in the variable `output`" And, you think to yourself: *How do I check whether a variable stores an even value?* You've got *some idea* of how to approach this, but you aren't quite ready to write actual python code with correct syntax. Instead you attempt **strategy #1: write some pseudocode**. Pseudocode helps structure your thinking and gets your ideas written down, but doesn't have to have correct syntax. The psedocode (here just a bunch of comments) you start with is below: # variable my_val # if my_val is positive (> 0) and even: # output stores 1 With the pseudocode above you've got something that looks a little like python, but wouldn't execute. However, the ideas are there. We can see that `if` whatever is stored in `my_val` is positive and even, `output` will store the value 1. Okay, that's a start. And, while you know how to determine if a variable is positive (defined as greater than zero), you aren't totally sure how to determien if a variable stores an even value. So, you turn to **strategy #2**: **add "in python" to your search on the Internet** Your search term: "How to check whether a variable stores an even value _in python_." The Internet comes through for you explaining that if you check if the remainder after dividing a number by 2 is equal to zero, that number is even...and likely even gives you some python code in the example (`val % 2 == 0`). With this, you try to incorporate it into what you've already started. # this code errors my_val = 6 if my_val > 0 and my_val % 2 = 0: output == 1 So, you've now got some code written, and you really thought it was going to be correct, but you've now got an error. So, you read the SyntaxError and you're not really sure what the issue is. So, you consider **strategy #3: adding `print()` statements**. You think through the logic of your code and you say to yourself "OK, if `my_val` is 6, then the condition `my_val > 0` should be true, because `my_val` is greater than zero and `my_val % 2 = 0` should be true, since 6 is even. If those are both true, the code within the conditional code block should execute. If that happens, we could add a `print()` statement within the conditional, knowing that it would only print() something if my logic were true!" With that thought, you add a `print()` statement and re-execute your code: # this code errors my_val = 6 if my_val > 0 and my_val % 2 = 0: print('this should print!') output == 1 You see that you still get the `SyntaxError`, which makes sense, since we didn't actually change the code yet, but you *also* notice that the `print()` statement was never executed. At this point you know the error in your code happens *before* that `print()` statement...since nothing `print`ed. So, you revisit your conditional and the error message, and at this point you realize that the caret (`^`) is indicating there's some issue with the statement `my_val % 2 = 0`. Suddenly it finally hits you! You wanted to check for equality (`==`), not assign (`=`). You fix your code changing the assignment operator to an equality operator. # this code errors my_val = 6 if my_val > 0 and my_val % 2 == 0: print('this should print!') output == 1 At this point, we *still* get an error *but*, you'll notice 1) it's a *different* error and 2) our `print()` statement is now encountered. We know here that the conditional is now correct! So, we read our error message and see it says "'output' is not defined." And, you look at your code and realize, ah - the same mistake in reverse. The final statement `output == 1` is a statement of assignment, storing `1` in `output`. You fix your code and re-execute: my_val = 6 if my_val > 0 and my_val % 2 == 0: print('this should print!') output = 1 Note that your `print()` statement will `print()` unless it's removed, so your last step to clean up your code now that you're done with debugging is to remove it. With that, you've accomplished your task! my_val = 6 if my_val > 0 and my_val % 2 == 0: output = 1 output ## Exercises Q1. **What type of error would each of the following return?** ```python int('six') if num > 0 print("Greater than 0") if num > 0: print("Greater than 0") ``` Q2. **Given the function `my_function` provided, understand the function and then debug the function so that each of the following test cases pass without producing an error**: ```python def my_function(input_1, input_2): """A long function that might error.""" if len(input_1) > 1 if input_1[0] = 0: answer = 0 elif len(input_2) == 2: answer = input_2[1] / input_1[0] elif len(input_1) == len(input_2): answer = input_1[0] + input_2[0] print(answer) ``` Test cases: ```python my_function([0, 1], [0]) my_function([0], [0, 1]) my_function([1], [1]) my_function([1], ['1']) ```
running_sum = 0 for item in input_list: running_sum = running_sum + item special_value = input_list[3] return running_sum + special_value
conditional_block
main.rs
//! //! adapted from the rendy meshes demo //! #![cfg_attr( not(any(feature = "dx12", feature = "metal", feature = "vulkan")), allow(unused) )] // #[cfg(feature = "dx12")] // use gfx_backend_dx12::Backend; // #[cfg(feature = "metal")] // use gfx_backend_metal::Backend; // #[cfg(feature = "vulkan")] use gfx_backend_vulkan::Backend; use rand::prelude::*; use rendy::shader::SpirvReflection; use rendy_playground::crystal; use std::sync::mpsc::{channel, sync_channel, Receiver, Sender}; use { genmesh::generators::{IndexedPolygon, SharedVertex}, rand::distributions::{Distribution, Uniform}, rendy::{ command::{DrawIndexedCommand, QueueId, RenderPassEncoder}, factory::{Config, Factory}, graph::{render::*, GraphBuilder, GraphContext, NodeBuffer, NodeImage}, hal::{self, adapter::PhysicalDevice as _, device::Device as _}, init::winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }, init::AnyWindowedRendy, memory::Dynamic, mesh::{Mesh, Model, PosColorNorm}, resource::{Buffer, BufferInfo, DescriptorSet, DescriptorSetLayout, Escape, Handle}, shader::{ShaderKind, SourceLanguage, SourceShaderInfo, SpirvShader}, }, std::{cmp::min, mem::size_of, time}, }; use { genmesh::Triangulate, nalgebra::Vector3, random_color::RandomColor, rendy::mesh::Position, rendy_playground::player, }; lazy_static::lazy_static! { static ref VERTEX: SpirvShader = SourceShaderInfo::new( include_str!("shader.vert"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.vert").into(), ShaderKind::Vertex, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref FRAGMENT: SpirvShader = SourceShaderInfo::new( include_str!("shader.frag"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.frag").into(), ShaderKind::Fragment, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref SHADERS: rendy::shader::ShaderSetBuilder = rendy::shader::ShaderSetBuilder::default() .with_vertex(&*VERTEX).unwrap() .with_fragment(&*FRAGMENT).unwrap(); static ref SHADER_REFLECTION: SpirvReflection = SHADERS.reflect().unwrap(); } #[derive(Clone, Copy)] #[repr(C, align(16))] struct UniformArgs { proj: nalgebra::Matrix4<f32>, view: nalgebra::Matrix4<f32>, model: [nalgebra::Matrix4<f32>; 6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64 { ((s - 1) / align + 1) * align } const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn uniform_offset(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if !scene.per_instance_const.is_empty()
Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if !scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file"); let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event, .. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event) }, Event::EventsCleared => { if event_manager.should_close() { *control_flow = ControlFlow::Exit; } factory.maintain(&mut families); player_state.apply_events(event_manager.input_events()); scene.camera = Camera { // proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0), proj: rendy_playground::math::perspective_projection( aspect as f32, 3.1415 / 4.0, 1.0, 200.0, ), view: player_state.get_view_matrix(), }; if let Some(ref mut graph) = graph { let pt = ProfileTimer::start("graph.run"); graph.run(&mut factory, &mut families, &scene); } let elapsed = checkpoint.elapsed(); if (checkpoint.elapsed() >= std::time::Duration::from_secs(5)) { checkpoint = time::Instant::now(); // let mut rng = thread_rng(); // let scene = &mut scene.rad_scene; // for i in 0..scene.planes.num_planes() { // // seriously, there is no Vec.fill? // scene.diffuse[i] = Vector3::new(1f32, 1f32, 1f32); // scene.emit[i] = Vector3::new(0.0, 0.0, 0.0); // } // let mut rc = RandomColor::new(); // rc.luminosity(random_color::Luminosity::Bright); // let num_dots = 1000; // for _ in 0..num_dots { // let i = rng.gen_range(0, scene.planes.num_planes()); // let color = rc.to_rgb_array(); // scene.emit[i] = Vector3::new(color[0] as f32 / 255.0, color[1] as f32 / 255.0,color[2] as f32 / 255.0,); // } } // { // let pt= ProfileTimer::start("rad"); // scene.rad_scene.do_rad(); // } // for i in 0..scene.rad_scene.planes.num_planes() { // scene.per_instance[i].color[0] = scene.rad_scene.rad_front.r[i]; // scene.per_instance[i].color[1] = scene.rad_scene.rad_front.g[i]; // scene.per_instance[i].color[2] = scene.rad_scene.rad_front.b[i]; // } // for pi in &mut scene.per_instance { // let color = rc.to_rgb_array(); // pi.color = nalgebra::Vector3::new( // color[0] as f32 / 255.0, // color[1] as f32 / 255.0, // color[2] as f32 / 255.0, // ); // } } _ => {} } if *control_flow == ControlFlow::Exit { if let Some(graph) = graph.take() { graph.dispose(&mut factory, &scene); } drop(scene.object_mesh.take()); } }); }); }
{ // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; }
conditional_block
main.rs
//! //! adapted from the rendy meshes demo //! #![cfg_attr( not(any(feature = "dx12", feature = "metal", feature = "vulkan")), allow(unused) )] // #[cfg(feature = "dx12")] // use gfx_backend_dx12::Backend; // #[cfg(feature = "metal")] // use gfx_backend_metal::Backend; // #[cfg(feature = "vulkan")] use gfx_backend_vulkan::Backend; use rand::prelude::*; use rendy::shader::SpirvReflection; use rendy_playground::crystal; use std::sync::mpsc::{channel, sync_channel, Receiver, Sender}; use { genmesh::generators::{IndexedPolygon, SharedVertex}, rand::distributions::{Distribution, Uniform}, rendy::{ command::{DrawIndexedCommand, QueueId, RenderPassEncoder}, factory::{Config, Factory}, graph::{render::*, GraphBuilder, GraphContext, NodeBuffer, NodeImage}, hal::{self, adapter::PhysicalDevice as _, device::Device as _}, init::winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }, init::AnyWindowedRendy, memory::Dynamic, mesh::{Mesh, Model, PosColorNorm}, resource::{Buffer, BufferInfo, DescriptorSet, DescriptorSetLayout, Escape, Handle}, shader::{ShaderKind, SourceLanguage, SourceShaderInfo, SpirvShader}, }, std::{cmp::min, mem::size_of, time}, }; use { genmesh::Triangulate, nalgebra::Vector3, random_color::RandomColor, rendy::mesh::Position, rendy_playground::player, }; lazy_static::lazy_static! { static ref VERTEX: SpirvShader = SourceShaderInfo::new( include_str!("shader.vert"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.vert").into(), ShaderKind::Vertex, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref FRAGMENT: SpirvShader = SourceShaderInfo::new( include_str!("shader.frag"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.frag").into(), ShaderKind::Fragment, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref SHADERS: rendy::shader::ShaderSetBuilder = rendy::shader::ShaderSetBuilder::default() .with_vertex(&*VERTEX).unwrap() .with_fragment(&*FRAGMENT).unwrap(); static ref SHADER_REFLECTION: SpirvReflection = SHADERS.reflect().unwrap(); } #[derive(Clone, Copy)] #[repr(C, align(16))] struct UniformArgs { proj: nalgebra::Matrix4<f32>, view: nalgebra::Matrix4<f32>, model: [nalgebra::Matrix4<f32>; 6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64 { ((s - 1) / align + 1) * align } const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn uniform_offset(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if !scene.per_instance_const.is_empty() { // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; } Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if !scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file");
let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event, .. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event) }, Event::EventsCleared => { if event_manager.should_close() { *control_flow = ControlFlow::Exit; } factory.maintain(&mut families); player_state.apply_events(event_manager.input_events()); scene.camera = Camera { // proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0), proj: rendy_playground::math::perspective_projection( aspect as f32, 3.1415 / 4.0, 1.0, 200.0, ), view: player_state.get_view_matrix(), }; if let Some(ref mut graph) = graph { let pt = ProfileTimer::start("graph.run"); graph.run(&mut factory, &mut families, &scene); } let elapsed = checkpoint.elapsed(); if (checkpoint.elapsed() >= std::time::Duration::from_secs(5)) { checkpoint = time::Instant::now(); // let mut rng = thread_rng(); // let scene = &mut scene.rad_scene; // for i in 0..scene.planes.num_planes() { // // seriously, there is no Vec.fill? // scene.diffuse[i] = Vector3::new(1f32, 1f32, 1f32); // scene.emit[i] = Vector3::new(0.0, 0.0, 0.0); // } // let mut rc = RandomColor::new(); // rc.luminosity(random_color::Luminosity::Bright); // let num_dots = 1000; // for _ in 0..num_dots { // let i = rng.gen_range(0, scene.planes.num_planes()); // let color = rc.to_rgb_array(); // scene.emit[i] = Vector3::new(color[0] as f32 / 255.0, color[1] as f32 / 255.0,color[2] as f32 / 255.0,); // } } // { // let pt= ProfileTimer::start("rad"); // scene.rad_scene.do_rad(); // } // for i in 0..scene.rad_scene.planes.num_planes() { // scene.per_instance[i].color[0] = scene.rad_scene.rad_front.r[i]; // scene.per_instance[i].color[1] = scene.rad_scene.rad_front.g[i]; // scene.per_instance[i].color[2] = scene.rad_scene.rad_front.b[i]; // } // for pi in &mut scene.per_instance { // let color = rc.to_rgb_array(); // pi.color = nalgebra::Vector3::new( // color[0] as f32 / 255.0, // color[1] as f32 / 255.0, // color[2] as f32 / 255.0, // ); // } } _ => {} } if *control_flow == ControlFlow::Exit { if let Some(graph) = graph.take() { graph.dispose(&mut factory, &scene); } drop(scene.object_mesh.take()); } }); }); }
random_line_split
main.rs
//! //! adapted from the rendy meshes demo //! #![cfg_attr( not(any(feature = "dx12", feature = "metal", feature = "vulkan")), allow(unused) )] // #[cfg(feature = "dx12")] // use gfx_backend_dx12::Backend; // #[cfg(feature = "metal")] // use gfx_backend_metal::Backend; // #[cfg(feature = "vulkan")] use gfx_backend_vulkan::Backend; use rand::prelude::*; use rendy::shader::SpirvReflection; use rendy_playground::crystal; use std::sync::mpsc::{channel, sync_channel, Receiver, Sender}; use { genmesh::generators::{IndexedPolygon, SharedVertex}, rand::distributions::{Distribution, Uniform}, rendy::{ command::{DrawIndexedCommand, QueueId, RenderPassEncoder}, factory::{Config, Factory}, graph::{render::*, GraphBuilder, GraphContext, NodeBuffer, NodeImage}, hal::{self, adapter::PhysicalDevice as _, device::Device as _}, init::winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }, init::AnyWindowedRendy, memory::Dynamic, mesh::{Mesh, Model, PosColorNorm}, resource::{Buffer, BufferInfo, DescriptorSet, DescriptorSetLayout, Escape, Handle}, shader::{ShaderKind, SourceLanguage, SourceShaderInfo, SpirvShader}, }, std::{cmp::min, mem::size_of, time}, }; use { genmesh::Triangulate, nalgebra::Vector3, random_color::RandomColor, rendy::mesh::Position, rendy_playground::player, }; lazy_static::lazy_static! { static ref VERTEX: SpirvShader = SourceShaderInfo::new( include_str!("shader.vert"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.vert").into(), ShaderKind::Vertex, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref FRAGMENT: SpirvShader = SourceShaderInfo::new( include_str!("shader.frag"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.frag").into(), ShaderKind::Fragment, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref SHADERS: rendy::shader::ShaderSetBuilder = rendy::shader::ShaderSetBuilder::default() .with_vertex(&*VERTEX).unwrap() .with_fragment(&*FRAGMENT).unwrap(); static ref SHADER_REFLECTION: SpirvReflection = SHADERS.reflect().unwrap(); } #[derive(Clone, Copy)] #[repr(C, align(16))] struct UniformArgs { proj: nalgebra::Matrix4<f32>, view: nalgebra::Matrix4<f32>, model: [nalgebra::Matrix4<f32>; 6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64
const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn uniform_offset(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if !scene.per_instance_const.is_empty() { // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; } Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if !scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file"); let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event, .. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event) }, Event::EventsCleared => { if event_manager.should_close() { *control_flow = ControlFlow::Exit; } factory.maintain(&mut families); player_state.apply_events(event_manager.input_events()); scene.camera = Camera { // proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0), proj: rendy_playground::math::perspective_projection( aspect as f32, 3.1415 / 4.0, 1.0, 200.0, ), view: player_state.get_view_matrix(), }; if let Some(ref mut graph) = graph { let pt = ProfileTimer::start("graph.run"); graph.run(&mut factory, &mut families, &scene); } let elapsed = checkpoint.elapsed(); if (checkpoint.elapsed() >= std::time::Duration::from_secs(5)) { checkpoint = time::Instant::now(); // let mut rng = thread_rng(); // let scene = &mut scene.rad_scene; // for i in 0..scene.planes.num_planes() { // // seriously, there is no Vec.fill? // scene.diffuse[i] = Vector3::new(1f32, 1f32, 1f32); // scene.emit[i] = Vector3::new(0.0, 0.0, 0.0); // } // let mut rc = RandomColor::new(); // rc.luminosity(random_color::Luminosity::Bright); // let num_dots = 1000; // for _ in 0..num_dots { // let i = rng.gen_range(0, scene.planes.num_planes()); // let color = rc.to_rgb_array(); // scene.emit[i] = Vector3::new(color[0] as f32 / 255.0, color[1] as f32 / 255.0,color[2] as f32 / 255.0,); // } } // { // let pt= ProfileTimer::start("rad"); // scene.rad_scene.do_rad(); // } // for i in 0..scene.rad_scene.planes.num_planes() { // scene.per_instance[i].color[0] = scene.rad_scene.rad_front.r[i]; // scene.per_instance[i].color[1] = scene.rad_scene.rad_front.g[i]; // scene.per_instance[i].color[2] = scene.rad_scene.rad_front.b[i]; // } // for pi in &mut scene.per_instance { // let color = rc.to_rgb_array(); // pi.color = nalgebra::Vector3::new( // color[0] as f32 / 255.0, // color[1] as f32 / 255.0, // color[2] as f32 / 255.0, // ); // } } _ => {} } if *control_flow == ControlFlow::Exit { if let Some(graph) = graph.take() { graph.dispose(&mut factory, &scene); } drop(scene.object_mesh.take()); } }); }); }
{ ((s - 1) / align + 1) * align }
identifier_body
main.rs
//! //! adapted from the rendy meshes demo //! #![cfg_attr( not(any(feature = "dx12", feature = "metal", feature = "vulkan")), allow(unused) )] // #[cfg(feature = "dx12")] // use gfx_backend_dx12::Backend; // #[cfg(feature = "metal")] // use gfx_backend_metal::Backend; // #[cfg(feature = "vulkan")] use gfx_backend_vulkan::Backend; use rand::prelude::*; use rendy::shader::SpirvReflection; use rendy_playground::crystal; use std::sync::mpsc::{channel, sync_channel, Receiver, Sender}; use { genmesh::generators::{IndexedPolygon, SharedVertex}, rand::distributions::{Distribution, Uniform}, rendy::{ command::{DrawIndexedCommand, QueueId, RenderPassEncoder}, factory::{Config, Factory}, graph::{render::*, GraphBuilder, GraphContext, NodeBuffer, NodeImage}, hal::{self, adapter::PhysicalDevice as _, device::Device as _}, init::winit::{ event::{Event, WindowEvent}, event_loop::{ControlFlow, EventLoop}, window::WindowBuilder, }, init::AnyWindowedRendy, memory::Dynamic, mesh::{Mesh, Model, PosColorNorm}, resource::{Buffer, BufferInfo, DescriptorSet, DescriptorSetLayout, Escape, Handle}, shader::{ShaderKind, SourceLanguage, SourceShaderInfo, SpirvShader}, }, std::{cmp::min, mem::size_of, time}, }; use { genmesh::Triangulate, nalgebra::Vector3, random_color::RandomColor, rendy::mesh::Position, rendy_playground::player, }; lazy_static::lazy_static! { static ref VERTEX: SpirvShader = SourceShaderInfo::new( include_str!("shader.vert"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.vert").into(), ShaderKind::Vertex, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref FRAGMENT: SpirvShader = SourceShaderInfo::new( include_str!("shader.frag"), concat!(env!("CARGO_MANIFEST_DIR"), "/examples/meshes_simple/shader.frag").into(), ShaderKind::Fragment, SourceLanguage::GLSL, "main", ).precompile().unwrap(); static ref SHADERS: rendy::shader::ShaderSetBuilder = rendy::shader::ShaderSetBuilder::default() .with_vertex(&*VERTEX).unwrap() .with_fragment(&*FRAGMENT).unwrap(); static ref SHADER_REFLECTION: SpirvReflection = SHADERS.reflect().unwrap(); } #[derive(Clone, Copy)] #[repr(C, align(16))] struct UniformArgs { proj: nalgebra::Matrix4<f32>, view: nalgebra::Matrix4<f32>, model: [nalgebra::Matrix4<f32>; 6], } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstanceConst { translate: nalgebra::Vector3<f32>, dir: u32, } #[derive(Clone, Copy, Debug)] #[repr(C, align(16))] struct PerInstance { color: nalgebra::Vector3<f32>, pad: u32, } #[derive(Debug)] struct Camera { view: nalgebra::Projective3<f32>, // proj: nalgebra::Perspective3<f32>, proj: nalgebra::Matrix4<f32>, } struct Scene<B: hal::Backend> { camera: Camera, object_mesh: Option<Mesh<B>>, per_instance_const: Vec<PerInstanceConst>, per_instance: Vec<PerInstance>, } const UNIFORM_SIZE: u64 = size_of::<UniformArgs>() as u64; const NUM_INSTANCES: u64 = 1024 * 1024; const PER_INSTANCE_CONST_SIZE: u64 = size_of::<PerInstanceConst>() as u64; const PER_INSTANCE_SIZE: u64 = size_of::<PerInstance>() as u64; const fn align_to(s: u64, align: u64) -> u64 { ((s - 1) / align + 1) * align } const fn buffer_const_size(align: u64) -> u64 { align_to(PER_INSTANCE_CONST_SIZE * NUM_INSTANCES, align) } const fn buffer_frame_size(align: u64) -> u64 { align_to(UNIFORM_SIZE + PER_INSTANCE_SIZE * NUM_INSTANCES, align) } const fn buffer_size(align: u64, frames: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * frames } const fn
(index: usize, align: u64) -> u64 { buffer_const_size(align) + buffer_frame_size(align) * index as u64 } const fn per_instance_offset(index: usize, align: u64) -> u64 { uniform_offset(index, align) + UNIFORM_SIZE } #[derive(Debug, Default)] struct MeshRenderPipelineDesc; #[derive(Debug)] struct MeshRenderPipeline<B: hal::Backend> { align: u64, buffer: Escape<Buffer<B>>, sets: Vec<Escape<DescriptorSet<B>>>, } struct ProfileTimer { label: std::string::String, start: std::time::Instant, } impl ProfileTimer { fn start(label: &str) -> Self { ProfileTimer { label: label.into(), start: std::time::Instant::now(), } } } // impl Drop for ProfileTimer { // fn drop(&mut self) { // println!("{}: {:?}", self.label, self.start.elapsed()); // } // } impl<B> SimpleGraphicsPipelineDesc<B, Scene<B>> for MeshRenderPipelineDesc where B: hal::Backend, { type Pipeline = MeshRenderPipeline<B>; fn load_shader_set( &self, factory: &mut Factory<B>, _scene: &Scene<B>, ) -> rendy_shader::ShaderSet<B> { SHADERS.build(factory, Default::default()).unwrap() } fn vertices( &self, ) -> Vec<( Vec<hal::pso::Element<hal::format::Format>>, hal::pso::ElemStride, hal::pso::VertexInputRate, )> { return vec![ SHADER_REFLECTION .attributes(&["position"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Vertex), SHADER_REFLECTION .attributes(&["translate", "dir"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), SHADER_REFLECTION .attributes(&["color", "pad"]) .unwrap() .gfx_vertex_input_desc(hal::pso::VertexInputRate::Instance(1)), ]; } fn layout(&self) -> Layout { return SHADER_REFLECTION.layout().unwrap(); } fn build<'a>( self, ctx: &GraphContext<B>, factory: &mut Factory<B>, _queue: QueueId, scene: &Scene<B>, buffers: Vec<NodeBuffer>, images: Vec<NodeImage>, set_layouts: &[Handle<DescriptorSetLayout<B>>], ) -> Result<MeshRenderPipeline<B>, rendy_core::hal::pso::CreationError> { assert!(buffers.is_empty()); assert!(images.is_empty()); assert_eq!(set_layouts.len(), 1); let frames = ctx.frames_in_flight as _; let align = factory .physical() .limits() .min_uniform_buffer_offset_alignment; let mut buffer = factory .create_buffer( BufferInfo { size: buffer_size(align, frames) as u64, usage: hal::buffer::Usage::UNIFORM | hal::buffer::Usage::INDIRECT | hal::buffer::Usage::VERTEX, }, Dynamic, ) .unwrap(); let mut sets = Vec::new(); for index in 0..frames { unsafe { let set = factory .create_descriptor_set(set_layouts[0].clone()) .unwrap(); factory.write_descriptor_sets(Some(hal::pso::DescriptorSetWrite { set: set.raw(), binding: 0, array_offset: 0, descriptors: Some(hal::pso::Descriptor::Buffer( buffer.raw(), Some(uniform_offset(index as usize, align)) ..Some(uniform_offset(index as usize, align) + UNIFORM_SIZE), )), })); sets.push(set); } } if !scene.per_instance_const.is_empty() { // println!( // "upload const: {}", // std::mem::size_of::<PerInstanceConst>() * scene.per_instance_const.len() // ); unsafe { factory .upload_visible_buffer(&mut buffer, 0, &scene.per_instance_const[..]) .expect("update const buffer failed") }; } Ok(MeshRenderPipeline { align, buffer, sets, }) } } fn model_transform() -> nalgebra::Matrix4<f32> { let rot = nalgebra::UnitQuaternion::identity(); nalgebra::Similarity3::from_parts(Vector3::new(0.5, 0.5, 0.0).into(), rot, 0.5).into() } fn model_transform2() -> [nalgebra::Matrix4<f32>; 6] { let z_pos = nalgebra::UnitQuaternion::identity(); let z_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 0.0, -1.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let x_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(-1.0, 0.0, 0.0), &Vector3::new(0.0, 1.0, 0.0), ); let y_pos = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, 1.0, 0.0), &Vector3::new(0.0, 0.0, 1.0), ); let y_neg = nalgebra::UnitQuaternion::face_towards( &Vector3::new(0.0, -1.0, 0.0), &Vector3::new(0.0, 0.0, -1.0), ); // let unit = 0.125; let unit = 0.125; let scale = 0.125; [ nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, unit).into(), z_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, 0.0, -unit).into(), z_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(unit, 0.0, 0.0).into(), x_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(-unit, 0.0, 0.0).into(), x_neg, scale) .into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, unit, 0.0).into(), y_pos, scale).into(), nalgebra::Similarity3::from_parts(Vector3::new(0.0, -unit, 0.0).into(), y_neg, scale) .into(), ] } impl<B> SimpleGraphicsPipeline<B, Scene<B>> for MeshRenderPipeline<B> where B: hal::Backend, { type Desc = MeshRenderPipelineDesc; fn prepare( &mut self, factory: &Factory<B>, _queue: QueueId, _set_layouts: &[Handle<DescriptorSetLayout<B>>], index: usize, scene: &Scene<B>, ) -> PrepareResult { let pt = ProfileTimer::start("prepare"); // println!("index: {}", index); // println!( // "upload uniform {}: {}", // index, // std::mem::size_of::<UniformArgs>() // ); unsafe { factory .upload_visible_buffer( &mut self.buffer, uniform_offset(index, self.align), &[UniformArgs { // proj: scene.camera.proj.to_homogeneous(), proj: scene.camera.proj, view: scene.camera.view.to_homogeneous(), model: model_transform2(), }], ) .unwrap() }; // { // let per_instance = &scene.per_instance[..]; // println!( // "upload dyn {}: {}", // index, // // std::mem::size_of::<PerInstance>() * scene.per_instance.len(), // std::mem::size_of_val(per_instance) // ); // } if !scene.per_instance.is_empty() { unsafe { factory .upload_visible_buffer( &mut self.buffer, per_instance_offset(index, self.align), &scene.per_instance[..], ) .unwrap() }; } PrepareResult::DrawReuse } fn draw( &mut self, layout: &B::PipelineLayout, mut encoder: RenderPassEncoder<'_, B>, index: usize, scene: &Scene<B>, ) { println!("draw"); unsafe { encoder.bind_graphics_descriptor_sets( layout, 0, Some(self.sets[index].raw()), std::iter::empty(), ); let vertex = [SHADER_REFLECTION.attributes(&["position"]).unwrap()]; scene .object_mesh .as_ref() .unwrap() .bind(0, &vertex, &mut encoder) .unwrap(); encoder.bind_vertex_buffers(1, std::iter::once((self.buffer.raw(), 0))); encoder.bind_vertex_buffers( 2, std::iter::once((self.buffer.raw(), per_instance_offset(index, self.align))), ); encoder.draw_indexed( 0..scene.object_mesh.as_ref().unwrap().len(), 0 as i32, 0..scene.per_instance.len() as u32, ) } } fn dispose(self, _factory: &mut Factory<B>, _scene: &Scene<B>) {} } fn main() { env_logger::Builder::from_default_env() .filter_module("meshes", log::LevelFilter::Trace) .init(); let mut event_loop = EventLoop::new(); let window = WindowBuilder::new() .with_inner_size((960, 640).into()) .with_title("Rendy example"); let config: Config = Default::default(); let rendy = AnyWindowedRendy::init_auto(&config, window, &event_loop).unwrap(); rendy::with_any_windowed_rendy!((rendy) use back; (mut factory, mut families, surface, window) => { let mut graph_builder = GraphBuilder::<Backend, Scene<Backend>>::new(); let size = window.inner_size().to_physical(window.hidpi_factor()); let window_kind = hal::image::Kind::D2(size.width as u32, size.height as u32, 1, 1); let aspect = size.width / size.height; let depth = graph_builder.create_image( window_kind, 1, hal::format::Format::D32Sfloat, Some(hal::command::ClearValue { depth_stencil: hal::command::ClearDepthStencil { depth: 1.0, stencil: 0, }, }), ); let pass = graph_builder.add_node( MeshRenderPipeline::builder() .into_subpass() .with_color_surface() .with_depth_stencil(depth) .into_pass() .with_surface( surface, hal::window::Extent2D { width: size.width as _, height: size.height as _, }, Some(hal::command::ClearValue { color: hal::command::ClearColor { float32: [0.5, 0.5, 1.0, 1.0], }, }), ), ); let bm = crystal::read_map("hidden_ramp.txt").expect("could not read file"); let mut planes = crystal::PlanesSep::new(); planes.create_planes(&bm); let planes_copy : Vec<crystal::Plane> = planes.planes_iter().cloned().collect(); let mut scene = Scene { camera: Camera { proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0) .to_homogeneous(), view: nalgebra::Projective3::identity() * nalgebra::Translation3::new(0.0, 0.0, 10.0), }, object_mesh: None, per_instance: vec![], per_instance_const: vec![], }; // let mut rng = rand::thread_rng(); // let col_dist = Uniform::new(0.5, 1.0); let mut rc = RandomColor::new(); rc.luminosity(random_color::Luminosity::Bright); println!("planes: {}", planes_copy.len()); for i in 0..std::cmp::min(NUM_INSTANCES as usize,planes_copy.len()) { let color = rc.to_rgb_array(); let point = planes_copy[i].cell; let dir = match planes_copy[i].dir { crystal::Dir::ZxPos => 4, crystal::Dir::ZxNeg => 5, crystal::Dir::YzPos => 2, crystal::Dir::YzNeg => 3, crystal::Dir::XyPos => 0, crystal::Dir::XyNeg => 1, }; scene.per_instance_const.push(PerInstanceConst{ translate: nalgebra::Vector3::new(point[0] as f32 * 0.25, point[1] as f32 * 0.25, point[2] as f32 * 0.25), dir : dir, }); scene.per_instance.push(PerInstance{ color : nalgebra::Vector3::new( color[0] as f32 / 255.0, color[1] as f32 / 255.0, color[2] as f32 / 255.0, ), pad : 0, }); } let graph = graph_builder .build(&mut factory, &mut families, &scene) .unwrap(); // let icosphere = genmesh::generators::IcoSphere::subdivide(3); // let icosphere = genmesh::generators::Torus::new(1f32, 0.5f32, 32, 32); let icosphere = genmesh::generators::Plane::new(); // icosphere. let indices: Vec<_> = genmesh::Vertices::vertices(icosphere.indexed_polygon_iter().triangulate()) .map(|i| i as u32) .collect(); println!("indices: {}", indices.len()); let vertices: Vec<_> = icosphere .shared_vertex_iter() .map(|v| Position(v.pos.into())) .collect(); println!("vertices: {}", vertices.len()); for v in &vertices { println!("vert: {:?}", v); } scene.object_mesh = Some( Mesh::<Backend>::builder() .with_indices(&indices[..]) .with_vertices(&vertices[..]) .build(graph.node_queue(pass), &factory) .unwrap(), ); let started = time::Instant::now(); let mut frames = 0u64..; // let rxy = Uniform::new(-1.0, 1.0); // let rz = Uniform::new(0.0, 185.0); let mut checkpoint = started; let mut player_state = player::State::new(); let mut event_manager = player::EventManager::new(); let mut graph = Some(graph); event_loop.run(move |event, _, control_flow| { *control_flow = ControlFlow::Poll; match event { Event::WindowEvent { event, .. } => match event { WindowEvent::CloseRequested => *control_flow = ControlFlow::Exit, _ => event_manager.window_event(event) }, Event::EventsCleared => { if event_manager.should_close() { *control_flow = ControlFlow::Exit; } factory.maintain(&mut families); player_state.apply_events(event_manager.input_events()); scene.camera = Camera { // proj: nalgebra::Perspective3::new(aspect as f32, 3.1415 / 4.0, 1.0, 200.0), proj: rendy_playground::math::perspective_projection( aspect as f32, 3.1415 / 4.0, 1.0, 200.0, ), view: player_state.get_view_matrix(), }; if let Some(ref mut graph) = graph { let pt = ProfileTimer::start("graph.run"); graph.run(&mut factory, &mut families, &scene); } let elapsed = checkpoint.elapsed(); if (checkpoint.elapsed() >= std::time::Duration::from_secs(5)) { checkpoint = time::Instant::now(); // let mut rng = thread_rng(); // let scene = &mut scene.rad_scene; // for i in 0..scene.planes.num_planes() { // // seriously, there is no Vec.fill? // scene.diffuse[i] = Vector3::new(1f32, 1f32, 1f32); // scene.emit[i] = Vector3::new(0.0, 0.0, 0.0); // } // let mut rc = RandomColor::new(); // rc.luminosity(random_color::Luminosity::Bright); // let num_dots = 1000; // for _ in 0..num_dots { // let i = rng.gen_range(0, scene.planes.num_planes()); // let color = rc.to_rgb_array(); // scene.emit[i] = Vector3::new(color[0] as f32 / 255.0, color[1] as f32 / 255.0,color[2] as f32 / 255.0,); // } } // { // let pt= ProfileTimer::start("rad"); // scene.rad_scene.do_rad(); // } // for i in 0..scene.rad_scene.planes.num_planes() { // scene.per_instance[i].color[0] = scene.rad_scene.rad_front.r[i]; // scene.per_instance[i].color[1] = scene.rad_scene.rad_front.g[i]; // scene.per_instance[i].color[2] = scene.rad_scene.rad_front.b[i]; // } // for pi in &mut scene.per_instance { // let color = rc.to_rgb_array(); // pi.color = nalgebra::Vector3::new( // color[0] as f32 / 255.0, // color[1] as f32 / 255.0, // color[2] as f32 / 255.0, // ); // } } _ => {} } if *control_flow == ControlFlow::Exit { if let Some(graph) = graph.take() { graph.dispose(&mut factory, &scene); } drop(scene.object_mesh.take()); } }); }); }
uniform_offset
identifier_name
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn ensure_default_columns(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if !fragment.is_sparse() { if ts_len != fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if !self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name) { bail!("Column Name already exists '{}'", column.name); } } self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> { let _ = self.ensure_group(source_id)?; Ok(()) } fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> {
bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
let meta = meta.as_ref(); if !meta.exists() {
random_line_split
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn ensure_default_columns(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if !fragment.is_sparse() { if ts_len != fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if !self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name) { bail!("Column Name already exists '{}'", column.name); } } self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()>
fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { let meta = meta.as_ref(); if !meta.exists() { bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
{ let _ = self.ensure_group(source_id)?; Ok(()) }
identifier_body
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn
(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if !fragment.is_sparse() { if ts_len != fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if !self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name) { bail!("Column Name already exists '{}'", column.name); } } self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> { let _ = self.ensure_group(source_id)?; Ok(()) } fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { let meta = meta.as_ref(); if !meta.exists() { bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
ensure_default_columns
identifier_name
catalog.rs
use crate::block::BlockType; use crate::error::*; use crate::mutator::append::Append; use crate::params::{SourceId, CATALOG_METADATA, TIMESTAMP_COLUMN}; use crate::scanner::{Scan, ScanResult}; use crate::storage::manager::PartitionGroupManager; use crate::ty::{BlockStorage, ColumnId, ColumnIndexStorageMap}; use hyena_common::collections::HashMap; use rayon::iter::{IntoParallelRefIterator, ParallelIterator}; use std::default::Default; use std::iter::FromIterator; use std::path::{Path, PathBuf}; use super::column::Column; use super::partition_group::PartitionGroup; use super::{ColumnMap, PartitionGroupMap}; #[derive(Debug, Serialize, Deserialize)] pub struct Catalog<'cat> { pub(crate) colmap: ColumnMap, pub(crate) groups: PartitionGroupMap<'cat>, pub(crate) indexes: ColumnIndexStorageMap, #[serde(skip)] pub(crate) data_root: PathBuf, } impl<'cat> Catalog<'cat> { pub fn new<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); if meta.exists() { bail!("Catalog metadata already exists {:?}", meta); } let mut catalog = Catalog { colmap: Default::default(), groups: Default::default(), indexes: Default::default(), data_root: root, }; catalog.ensure_default_columns()?; Ok(catalog) } fn ensure_default_columns(&mut self) -> Result<()> { let ts_column = Column::new(BlockStorage::Memmap(BlockType::U64Dense), "timestamp"); let source_column = Column::new(BlockStorage::Memory(BlockType::I32Dense), "source_id"); let mut map = HashMap::new(); map.insert(TIMESTAMP_COLUMN, ts_column); map.insert(1, source_column); self.ensure_columns(map) } pub fn with_data<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { let root = root.as_ref().to_path_buf(); let meta = root.join(CATALOG_METADATA); Catalog::deserialize(&meta, &root) } pub fn open_or_create<P: AsRef<Path>>(root: P) -> Result<Catalog<'cat>> { Catalog::with_data(root.as_ref()).or_else(|_| Catalog::new(root.as_ref())) } pub fn columns(&self) -> &ColumnMap { &self.colmap } #[cfg(feature = "validate_append")] fn validate_append(&self, data: &Append) -> bool { let ts_len = data.ts.len(); data.data.iter().all(|(_col, fragment)| { // check fragment length for dense blocks if !fragment.is_sparse() { if ts_len != fragment.len() { error!("Dense append fragment has different length than ts"); return false; } } else { if ts_len > fragment.len() { error!("Sparse append fragment longer than ts"); return false; } if fragment.iter().any(|(idx, _)| idx >= ts_len) { error!("Sparse append fragment has index greater than ts length"); return false; } } true }) } pub fn append(&self, data: &Append) -> Result<usize> { if data.is_empty() { bail!("Provided Append contains no data"); } #[cfg(feature = "validate_append")] { if !self.validate_append(&data) { bail!("Provided Append is not consistent"); } } // dispatch to proper PartitionGroup if let Some(pg) = self.groups.get(&data.source_id) { pg.append(&self, &data) } else { bail!("No PartitionGroup found for source_id = {}", data.source_id); } } pub fn scan(&self, scan: &Scan) -> Result<ScanResult> { let all_groups = if scan.groups.is_some() { None } else { Some(self.groups.keys().cloned().collect::<Vec<_>>()) }; if scan.groups.is_some() { scan.groups.as_ref().unwrap() } else { all_groups.as_ref().unwrap() } .chunks(2) .map(|group| { group .par_iter() .filter_map(|pgid| self.groups.get(pgid)) .map(|pg| pg.scan(&self, &scan)) // todo: this would potentially be better with some short-circuiting combinator // instead // need to bench with collect_into() .reduce( || Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }, ) }) .fold(Ok(ScanResult::merge_identity()), |a, b| { let mut a = a?; let b = b?; a.merge(b)?; Ok(a) }) } pub fn flush(&self) -> Result<()> { // TODO: add dirty flag let meta = self.data_root.join(CATALOG_METADATA); for pg in self.groups.values() { pg.flush()? } Catalog::serialize(self, &meta) } /// Extend internal column map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a column type. /// Use this feature with great caution. pub(crate) fn ensure_columns(&mut self, type_map: ColumnMap) -> Result<()> { self.colmap.extend(type_map); Ok(()) } /// Adds a column to the catalog. It verifies that catalog does not already contain: /// a) column with the given id, or /// b) column with the given name. /// This function takes all-or-nothing approach: /// either all columns are added, or no changes are applied. pub fn add_columns(&mut self, column_map: ColumnMap) -> Result<()> { for (id, column) in column_map.iter() { info!( "Adding column {}:{:?} with id {}", column.name, column.ty, id ); if self.colmap.contains_key(id) { bail!("Column Id already exists {}", *id); } if self.colmap.values().any(|col| col.name == column.name)
} self.ensure_columns(column_map) } /// Extend internal index map without any sanitization checks. /// /// This function uses `std::iter::Extend` internally, /// so it allows redefinition of a index type. /// Also, the index' support for a given column is not checked. /// Use this feature with great caution. pub(crate) fn ensure_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { self.indexes.extend(&*index_map); Ok(()) } /// Adds index to the catalog. Verifies that catalog does not already contain: /// a) index for a column with the given id, or /// b) index for a column with the given name. /// This function takes all-or-nothing approach: /// either all indexes are added, or no changes are applied. pub fn add_indexes(&mut self, index_map: ColumnIndexStorageMap) -> Result<()> { for (id, index) in index_map.iter() { let column = self .colmap .get(id) .ok_or_else(|| err_msg(format!("column not found {}", id)))?; info!( "Adding index {:?} for column {}[{}]:{:?}", index, column.name, id, column.ty ); if self.indexes.contains_key(id) { bail!("Index already exists {}", *id); } } self.ensure_indexes(index_map) } /// Fetch the first non-occupied column index /// /// todo: rethink this approach (max() every time) pub fn next_id(&self) -> usize { let default = 0; *self.colmap.keys().max().unwrap_or(&default) + 1 } /// Calculate an empty partition's capacity for given column set pub(super) fn space_for_blocks<'iter>( &self, indices: impl Iterator<Item = &'iter ColumnId>, ) -> usize { use crate::params::BLOCK_SIZE; indices .filter_map(|col_id| { if let Some(column) = self.colmap.get(col_id) { Some(BLOCK_SIZE / column.size_of()) } else { None } }) .min() // the default shouldn't ever happen, as there always should be ts block // but in case it happens, this will return 0 // which in turn will cause new partition to be used .unwrap_or_default() } pub(crate) fn ensure_group( &mut self, source_id: SourceId, ) -> Result<&mut PartitionGroup<'cat>> { let data_root = <_ as AsRef<Path>>::as_ref(&self.data_root); Ok(self.groups.entry(source_id).or_insert_with(|| { // this shouldn't fail in general let root = PartitionGroupManager::new(data_root, source_id) .with_context(|_| "Failed to create group manager") .unwrap(); let pg = PartitionGroup::new(&root, source_id) .with_context(|_| "Unable to create partition group") .unwrap(); pg.flush().unwrap(); pg })) } /// Add new partition group with given source id pub fn add_partition_group(&mut self, source_id: SourceId) -> Result<()> { let _ = self.ensure_group(source_id)?; Ok(()) } fn prepare_partition_groups<P, I>(root: P, ids: I) -> Result<PartitionGroupMap<'cat>> where P: AsRef<Path>, I: IntoIterator<Item = SourceId>, { ids.into_iter() .map(|source_id| { let path = PartitionGroupManager::new(&root, source_id).with_context(|_| { format!( "Unable to obtain data directory for partition group {}", source_id ) })?; let partition_group = PartitionGroup::with_data(path) .with_context(|_| format!("Unable to read partition group {:?}", source_id))?; Ok((source_id, partition_group)) }) .collect() } fn serialize<P: AsRef<Path>>(catalog: &Catalog<'cat>, meta: P) -> Result<()> { let meta = meta.as_ref(); let group_metas = Vec::from_iter(catalog.groups.keys()); let data = (catalog, group_metas); serialize!(file meta, &data) .with_context(|_| "Failed to serialize catalog metadata") .map_err(|e| e.into()) } fn deserialize<P: AsRef<Path>, R: AsRef<Path>>(meta: P, root: R) -> Result<Catalog<'cat>> { let meta = meta.as_ref(); if !meta.exists() { bail!("Cannot find catalog metadata {:?}", meta); } let (mut catalog, group_metas): (Catalog, Vec<SourceId>) = deserialize!(file meta).with_context(|_| "Failed to read catalog metadata")?; catalog.groups = Catalog::prepare_partition_groups(&root, group_metas) .with_context(|_| "Failed to read partition data")?; catalog.data_root = root.as_ref().to_path_buf(); Ok(catalog) } } impl<'cat> Drop for Catalog<'cat> { fn drop(&mut self) { self.flush() .with_context(|_| "Failed to flush data during drop") .unwrap(); } } impl<'cat> AsRef<ColumnMap> for Catalog<'cat> { fn as_ref(&self) -> &ColumnMap { &self.colmap } } #[cfg(test)] mod tests { use super::*; use crate::datastore::tests::create_random_partitions; #[test] fn new() { let source_ids = [1, 5, 7]; let im_part_count = 8; let mut_part_count = 2; let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); for source_id in &source_ids { let pg = cat .ensure_group(*source_id) .with_context(|_| "Unable to retrieve partition group") .unwrap(); create_random_partitions(pg, im_part_count, mut_part_count); } } #[test] fn add_partition_group_idempotence() { let root = tempdir!(); let mut cat = Catalog::new(&root) .with_context(|_| "Unable to create catalog") .unwrap(); const PG_ID: SourceId = 10; cat.add_partition_group(PG_ID).unwrap(); cat.add_partition_group(PG_ID).unwrap(); assert_eq!(cat.groups.len(), 1); assert_eq!( cat.groups .iter() .nth(0) .expect("partition group not found") .0, &PG_ID ); } }
{ bail!("Column Name already exists '{}'", column.name); }
conditional_block
ViewTrajectories.py
#!/usr/bin/env python from time import clock, sleep import wx from numpy import * import os from wx.lib.floatcanvas import FloatCanvas, NavCanvas #import hazmat, TAP_mod ID_ABOUT_MENU = wx.NewId() ID_EXIT_MENU = wx.NewId() ID_ZOOM_IN_MENU = wx.NewId() ID_ZOOM_OUT_MENU = wx.NewId() ID_ZOOM_TO_FIT_MENU = wx.NewId() ID_DRAWTEST_MENU = wx.NewId() ID_DRAWMAP_MENU = wx.NewId() ID_CLEAR_MENU = wx.NewId() ID_SET_FRAMERATE_MENU = wx.NewId() ID_OPEN = wx.NewId() ID_RUN_MOVIE = wx.NewId() ID_RUNONTOP_MOVIE = wx.NewId() ID_RERUN_MOVIE = wx.NewId() ID_PAUSE_BUTTON = wx.NewId() colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"] CurrentColor = [0] def GetColor(): color = colorlist[CurrentColor[0]] CurrentColor[0] += 1 if CurrentColor[0] > len(colorlist): CurrentColor[0] = 0 return color def EVT_NEW_FRAME_EVENT( window, function ): window.Connect( -1, -1, NEW_FRAME_EVENT, function ) class FrameEvent(wx.PyEvent): def __init__(self): wx.PyEvent.__init__(self) self.SetEventType(NEW_FRAME_EVENT) class DrawFrame(wx.Frame): def __init__(self, *args, **kwargs): wx.Frame.__init__(self, *args, **kwargs) ## Set up the MenuBar MenuBar = wx.MenuBar() file_menu = wx.Menu() file_menu.Append(ID_OPEN, "&Open map","Open a bna file") wx.EVT_MENU(self, ID_OPEN, self.Open_bna) file_menu.AppendSeparator() file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory") wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing") wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie) file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory") wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie) file_menu.AppendSeparator() file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program") wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit) wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) MenuBar.Append(file_menu, "&File") view_menu = wx.Menu() view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window") wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit) view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback") wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate) MenuBar.Append(view_menu, "&View") help_menu = wx.Menu() help_menu.Append(ID_ABOUT_MENU, "&About", "More information About this program") wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout) MenuBar.Append(help_menu, "&Help") self.SetMenuBar(MenuBar) self.CreateStatusBar() self.SetStatusText("") wx.EVT_CLOSE(self, self.OnCloseWindow) # Add the Canvas self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500), ProjectionFun = 'FlatEarth', Debug = 0, #BackgroundColor = "DARK SLATE BLUE") BackgroundColor = "WHITE", #UseBackground = 1, ).Canvas self.Canvas = NavCanvas.Canvas self.Canvas.NumBetweenBlits = 20 tb = self.NavCanvas.ToolBar tb.AddSeparator() RewindButton = wx.Button(tb, -1, "Rewind") tb.AddControl(RewindButton) wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind) StopButton = wx.Button(tb, -1, "Stop") tb.AddControl(StopButton) wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop) PlayButton = wx.Button(tb, -1, "Play") tb.AddControl(PlayButton) wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play) tb.Realize() self.Show(True) self.LE_movie = None self.LEsObjects = [] self.TimeStep = 0 self.FrameDelay = 10 # milliseconds self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN) self.Timer = wx.PyTimer(self.ShowFrame) return None def Open_bna(self, event): dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() self.LoadMap(filename) def LoadMap(self, filename): self.Canvas.Clear() try: shorelines = hazmat.read_bna(filename,polytype = "PolygonSet") for shoreline in shorelines: self.Canvas.AddPolygon(shoreline, LineWidth = 1, LineColor = "Black", FillColor = "Brown", FillStyle = 'Solid', Foreground = 0) self.Canvas.ZoomToBB() except: dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file', 'View Trajectories', wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() def Load_Movie(self, event): import glob dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() (self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename) wx.GetApp().Yield() return True else: return None def Run_Movie(self, event): if self.Load_Movie(None): if self.LEsObjects: self.Canvas.RemoveObjects(self.LEsObjects) self.LEsObjects = [] self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1)) CurrentColor[0] = 1 self.ReRun_Movie(None) def RunOnTop_Movie(self, event): if self.Load_Movie(None): for object in self.LEsObjects: object.PutInBackground() self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) ) self.ReRun_Movie(None) def ReRun_Movie(self, event): if not self.LE_movie: self.Run_Movie(None) else: self.Play(None) ## def UpdateThread(self): ## try: ## while hasattr(self, 'event') and not self.event.isSet(): ## wx.PostEvent(self, FrameEvent()) ## self.event.wait(self.FrameDelay) ## except wx.PyDeadObjectError: # BUG: we were destroyed ## return def Running(self): """Returns true if the animation is running""" return self.Timer.IsRunning() def Play(self,event): """Start the animation""" if not self.Running(): if self.LE_movie: #self.event.clear() #thread = threading.Thread(target = self.UpdateThread) #thread.start() self.Timer.Start(self.FrameDelay) else: self.Run_Movie(None) def Stop(self,event): self.Timer.Stop() def ShowFrame(self): if self.TimeStep < len(self.LE_movie): self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) # this sets the data for the next frame self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.Canvas.Draw() self.TimeStep += 1 wx.GetApp().Yield(True) else: self.Timer.Stop() def Rewind(self,event): self.TimeStep = 0 if self.LE_movie: self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) self.Canvas.Draw() def OnAbout(self, event): dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n" "the use of the FloatCanvas\n", "About Me", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() def ZoomToFit(self,event): self.Canvas.ZoomToBB() def Clear(self,event = None): self.Canvas.Clear() self.Canvas.Draw() def OnQuit(self,event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() def RunMovie(self,event = None): import RandomArray start = clock() shift = RandomArray.randint(0,0,(2,)) NumFrames = 50 for i in range(NumFrames): points = self.LEs.Points shift = RandomArray.randint(-5,5,(2,)) points += shift self.LEs.SetPoints(points) self.Canvas.Draw() print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames) def SetFrameRate(self,event): dlg = wx.TextEntryDialog(self, 'Please set the time between frames in milliseconds', 'ViewTrajectories', "%i"%self.FrameDelay) dlg.SetValue("%i"%self.FrameDelay) if dlg.ShowModal() == wx.ID_OK: try: self.FrameDelay = int(dlg.GetValue()) except: pass
Any bugs, comments, feedback, questions, and especially code are welcome: -Chris Barker Chris.Barker@noaa.gov """ def OnInit(self): frame = DrawFrame(None, title="Trajectory Viewer", size=(700,700)) self.SetTopWindow(frame) return True if __name__ == "__main__": app = TrajectoryViewer(0) app.MainLoop()
dlg.Destroy() class TrajectoryViewer(wx.App): """
random_line_split
ViewTrajectories.py
#!/usr/bin/env python from time import clock, sleep import wx from numpy import * import os from wx.lib.floatcanvas import FloatCanvas, NavCanvas #import hazmat, TAP_mod ID_ABOUT_MENU = wx.NewId() ID_EXIT_MENU = wx.NewId() ID_ZOOM_IN_MENU = wx.NewId() ID_ZOOM_OUT_MENU = wx.NewId() ID_ZOOM_TO_FIT_MENU = wx.NewId() ID_DRAWTEST_MENU = wx.NewId() ID_DRAWMAP_MENU = wx.NewId() ID_CLEAR_MENU = wx.NewId() ID_SET_FRAMERATE_MENU = wx.NewId() ID_OPEN = wx.NewId() ID_RUN_MOVIE = wx.NewId() ID_RUNONTOP_MOVIE = wx.NewId() ID_RERUN_MOVIE = wx.NewId() ID_PAUSE_BUTTON = wx.NewId() colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"] CurrentColor = [0] def GetColor(): color = colorlist[CurrentColor[0]] CurrentColor[0] += 1 if CurrentColor[0] > len(colorlist): CurrentColor[0] = 0 return color def EVT_NEW_FRAME_EVENT( window, function ): window.Connect( -1, -1, NEW_FRAME_EVENT, function ) class FrameEvent(wx.PyEvent): def __init__(self): wx.PyEvent.__init__(self) self.SetEventType(NEW_FRAME_EVENT) class DrawFrame(wx.Frame): def __init__(self, *args, **kwargs): wx.Frame.__init__(self, *args, **kwargs) ## Set up the MenuBar MenuBar = wx.MenuBar() file_menu = wx.Menu() file_menu.Append(ID_OPEN, "&Open map","Open a bna file") wx.EVT_MENU(self, ID_OPEN, self.Open_bna) file_menu.AppendSeparator() file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory") wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing") wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie) file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory") wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie) file_menu.AppendSeparator() file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program") wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit) wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) MenuBar.Append(file_menu, "&File") view_menu = wx.Menu() view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window") wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit) view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback") wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate) MenuBar.Append(view_menu, "&View") help_menu = wx.Menu() help_menu.Append(ID_ABOUT_MENU, "&About", "More information About this program") wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout) MenuBar.Append(help_menu, "&Help") self.SetMenuBar(MenuBar) self.CreateStatusBar() self.SetStatusText("") wx.EVT_CLOSE(self, self.OnCloseWindow) # Add the Canvas self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500), ProjectionFun = 'FlatEarth', Debug = 0, #BackgroundColor = "DARK SLATE BLUE") BackgroundColor = "WHITE", #UseBackground = 1, ).Canvas self.Canvas = NavCanvas.Canvas self.Canvas.NumBetweenBlits = 20 tb = self.NavCanvas.ToolBar tb.AddSeparator() RewindButton = wx.Button(tb, -1, "Rewind") tb.AddControl(RewindButton) wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind) StopButton = wx.Button(tb, -1, "Stop") tb.AddControl(StopButton) wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop) PlayButton = wx.Button(tb, -1, "Play") tb.AddControl(PlayButton) wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play) tb.Realize() self.Show(True) self.LE_movie = None self.LEsObjects = [] self.TimeStep = 0 self.FrameDelay = 10 # milliseconds self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN) self.Timer = wx.PyTimer(self.ShowFrame) return None def Open_bna(self, event): dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() self.LoadMap(filename) def LoadMap(self, filename): self.Canvas.Clear() try: shorelines = hazmat.read_bna(filename,polytype = "PolygonSet") for shoreline in shorelines: self.Canvas.AddPolygon(shoreline, LineWidth = 1, LineColor = "Black", FillColor = "Brown", FillStyle = 'Solid', Foreground = 0) self.Canvas.ZoomToBB() except: dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file', 'View Trajectories', wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() def Load_Movie(self, event): import glob dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() (self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename) wx.GetApp().Yield() return True else: return None def Run_Movie(self, event): if self.Load_Movie(None): if self.LEsObjects: self.Canvas.RemoveObjects(self.LEsObjects) self.LEsObjects = [] self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1)) CurrentColor[0] = 1 self.ReRun_Movie(None) def RunOnTop_Movie(self, event): if self.Load_Movie(None): for object in self.LEsObjects: object.PutInBackground() self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) ) self.ReRun_Movie(None) def ReRun_Movie(self, event): if not self.LE_movie: self.Run_Movie(None) else: self.Play(None) ## def UpdateThread(self): ## try: ## while hasattr(self, 'event') and not self.event.isSet(): ## wx.PostEvent(self, FrameEvent()) ## self.event.wait(self.FrameDelay) ## except wx.PyDeadObjectError: # BUG: we were destroyed ## return def Running(self): """Returns true if the animation is running""" return self.Timer.IsRunning() def Play(self,event): """Start the animation""" if not self.Running(): if self.LE_movie: #self.event.clear() #thread = threading.Thread(target = self.UpdateThread) #thread.start() self.Timer.Start(self.FrameDelay) else: self.Run_Movie(None) def Stop(self,event): self.Timer.Stop() def ShowFrame(self): if self.TimeStep < len(self.LE_movie): self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) # this sets the data for the next frame self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.Canvas.Draw() self.TimeStep += 1 wx.GetApp().Yield(True) else: self.Timer.Stop() def Rewind(self,event): self.TimeStep = 0 if self.LE_movie: self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) self.Canvas.Draw() def OnAbout(self, event): dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n" "the use of the FloatCanvas\n", "About Me", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() def ZoomToFit(self,event): self.Canvas.ZoomToBB() def Clear(self,event = None): self.Canvas.Clear() self.Canvas.Draw() def
(self,event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() def RunMovie(self,event = None): import RandomArray start = clock() shift = RandomArray.randint(0,0,(2,)) NumFrames = 50 for i in range(NumFrames): points = self.LEs.Points shift = RandomArray.randint(-5,5,(2,)) points += shift self.LEs.SetPoints(points) self.Canvas.Draw() print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames) def SetFrameRate(self,event): dlg = wx.TextEntryDialog(self, 'Please set the time between frames in milliseconds', 'ViewTrajectories', "%i"%self.FrameDelay) dlg.SetValue("%i"%self.FrameDelay) if dlg.ShowModal() == wx.ID_OK: try: self.FrameDelay = int(dlg.GetValue()) except: pass dlg.Destroy() class TrajectoryViewer(wx.App): """ Any bugs, comments, feedback, questions, and especially code are welcome: -Chris Barker Chris.Barker@noaa.gov """ def OnInit(self): frame = DrawFrame(None, title="Trajectory Viewer", size=(700,700)) self.SetTopWindow(frame) return True if __name__ == "__main__": app = TrajectoryViewer(0) app.MainLoop()
OnQuit
identifier_name
ViewTrajectories.py
#!/usr/bin/env python from time import clock, sleep import wx from numpy import * import os from wx.lib.floatcanvas import FloatCanvas, NavCanvas #import hazmat, TAP_mod ID_ABOUT_MENU = wx.NewId() ID_EXIT_MENU = wx.NewId() ID_ZOOM_IN_MENU = wx.NewId() ID_ZOOM_OUT_MENU = wx.NewId() ID_ZOOM_TO_FIT_MENU = wx.NewId() ID_DRAWTEST_MENU = wx.NewId() ID_DRAWMAP_MENU = wx.NewId() ID_CLEAR_MENU = wx.NewId() ID_SET_FRAMERATE_MENU = wx.NewId() ID_OPEN = wx.NewId() ID_RUN_MOVIE = wx.NewId() ID_RUNONTOP_MOVIE = wx.NewId() ID_RERUN_MOVIE = wx.NewId() ID_PAUSE_BUTTON = wx.NewId() colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"] CurrentColor = [0] def GetColor(): color = colorlist[CurrentColor[0]] CurrentColor[0] += 1 if CurrentColor[0] > len(colorlist): CurrentColor[0] = 0 return color def EVT_NEW_FRAME_EVENT( window, function ): window.Connect( -1, -1, NEW_FRAME_EVENT, function ) class FrameEvent(wx.PyEvent): def __init__(self): wx.PyEvent.__init__(self) self.SetEventType(NEW_FRAME_EVENT) class DrawFrame(wx.Frame): def __init__(self, *args, **kwargs): wx.Frame.__init__(self, *args, **kwargs) ## Set up the MenuBar MenuBar = wx.MenuBar() file_menu = wx.Menu() file_menu.Append(ID_OPEN, "&Open map","Open a bna file") wx.EVT_MENU(self, ID_OPEN, self.Open_bna) file_menu.AppendSeparator() file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory") wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing") wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie) file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory") wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie) file_menu.AppendSeparator() file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program") wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit) wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) MenuBar.Append(file_menu, "&File") view_menu = wx.Menu() view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window") wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit) view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback") wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate) MenuBar.Append(view_menu, "&View") help_menu = wx.Menu() help_menu.Append(ID_ABOUT_MENU, "&About", "More information About this program") wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout) MenuBar.Append(help_menu, "&Help") self.SetMenuBar(MenuBar) self.CreateStatusBar() self.SetStatusText("") wx.EVT_CLOSE(self, self.OnCloseWindow) # Add the Canvas self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500), ProjectionFun = 'FlatEarth', Debug = 0, #BackgroundColor = "DARK SLATE BLUE") BackgroundColor = "WHITE", #UseBackground = 1, ).Canvas self.Canvas = NavCanvas.Canvas self.Canvas.NumBetweenBlits = 20 tb = self.NavCanvas.ToolBar tb.AddSeparator() RewindButton = wx.Button(tb, -1, "Rewind") tb.AddControl(RewindButton) wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind) StopButton = wx.Button(tb, -1, "Stop") tb.AddControl(StopButton) wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop) PlayButton = wx.Button(tb, -1, "Play") tb.AddControl(PlayButton) wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play) tb.Realize() self.Show(True) self.LE_movie = None self.LEsObjects = [] self.TimeStep = 0 self.FrameDelay = 10 # milliseconds self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN) self.Timer = wx.PyTimer(self.ShowFrame) return None def Open_bna(self, event): dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() self.LoadMap(filename) def LoadMap(self, filename): self.Canvas.Clear() try: shorelines = hazmat.read_bna(filename,polytype = "PolygonSet") for shoreline in shorelines: self.Canvas.AddPolygon(shoreline, LineWidth = 1, LineColor = "Black", FillColor = "Brown", FillStyle = 'Solid', Foreground = 0) self.Canvas.ZoomToBB() except: dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file', 'View Trajectories', wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() def Load_Movie(self, event): import glob dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() (self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename) wx.GetApp().Yield() return True else: return None def Run_Movie(self, event): if self.Load_Movie(None): if self.LEsObjects: self.Canvas.RemoveObjects(self.LEsObjects) self.LEsObjects = [] self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1)) CurrentColor[0] = 1 self.ReRun_Movie(None) def RunOnTop_Movie(self, event): if self.Load_Movie(None): for object in self.LEsObjects: object.PutInBackground() self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) ) self.ReRun_Movie(None) def ReRun_Movie(self, event): if not self.LE_movie: self.Run_Movie(None) else: self.Play(None) ## def UpdateThread(self): ## try: ## while hasattr(self, 'event') and not self.event.isSet(): ## wx.PostEvent(self, FrameEvent()) ## self.event.wait(self.FrameDelay) ## except wx.PyDeadObjectError: # BUG: we were destroyed ## return def Running(self): """Returns true if the animation is running""" return self.Timer.IsRunning() def Play(self,event): """Start the animation""" if not self.Running(): if self.LE_movie: #self.event.clear() #thread = threading.Thread(target = self.UpdateThread) #thread.start() self.Timer.Start(self.FrameDelay) else: self.Run_Movie(None) def Stop(self,event): self.Timer.Stop() def ShowFrame(self): if self.TimeStep < len(self.LE_movie): self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) # this sets the data for the next frame self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.Canvas.Draw() self.TimeStep += 1 wx.GetApp().Yield(True) else: self.Timer.Stop() def Rewind(self,event): self.TimeStep = 0 if self.LE_movie: self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) self.Canvas.Draw() def OnAbout(self, event): dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n" "the use of the FloatCanvas\n", "About Me", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() def ZoomToFit(self,event): self.Canvas.ZoomToBB() def Clear(self,event = None): self.Canvas.Clear() self.Canvas.Draw() def OnQuit(self,event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() def RunMovie(self,event = None): import RandomArray start = clock() shift = RandomArray.randint(0,0,(2,)) NumFrames = 50 for i in range(NumFrames):
print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames) def SetFrameRate(self,event): dlg = wx.TextEntryDialog(self, 'Please set the time between frames in milliseconds', 'ViewTrajectories', "%i"%self.FrameDelay) dlg.SetValue("%i"%self.FrameDelay) if dlg.ShowModal() == wx.ID_OK: try: self.FrameDelay = int(dlg.GetValue()) except: pass dlg.Destroy() class TrajectoryViewer(wx.App): """ Any bugs, comments, feedback, questions, and especially code are welcome: -Chris Barker Chris.Barker@noaa.gov """ def OnInit(self): frame = DrawFrame(None, title="Trajectory Viewer", size=(700,700)) self.SetTopWindow(frame) return True if __name__ == "__main__": app = TrajectoryViewer(0) app.MainLoop()
points = self.LEs.Points shift = RandomArray.randint(-5,5,(2,)) points += shift self.LEs.SetPoints(points) self.Canvas.Draw()
conditional_block
ViewTrajectories.py
#!/usr/bin/env python from time import clock, sleep import wx from numpy import * import os from wx.lib.floatcanvas import FloatCanvas, NavCanvas #import hazmat, TAP_mod ID_ABOUT_MENU = wx.NewId() ID_EXIT_MENU = wx.NewId() ID_ZOOM_IN_MENU = wx.NewId() ID_ZOOM_OUT_MENU = wx.NewId() ID_ZOOM_TO_FIT_MENU = wx.NewId() ID_DRAWTEST_MENU = wx.NewId() ID_DRAWMAP_MENU = wx.NewId() ID_CLEAR_MENU = wx.NewId() ID_SET_FRAMERATE_MENU = wx.NewId() ID_OPEN = wx.NewId() ID_RUN_MOVIE = wx.NewId() ID_RUNONTOP_MOVIE = wx.NewId() ID_RERUN_MOVIE = wx.NewId() ID_PAUSE_BUTTON = wx.NewId() colorlist = ["BLACK", "RED", "CYAN", "GREEN", "SALMON", "VIOLET"] CurrentColor = [0] def GetColor(): color = colorlist[CurrentColor[0]] CurrentColor[0] += 1 if CurrentColor[0] > len(colorlist): CurrentColor[0] = 0 return color def EVT_NEW_FRAME_EVENT( window, function ): window.Connect( -1, -1, NEW_FRAME_EVENT, function ) class FrameEvent(wx.PyEvent): def __init__(self): wx.PyEvent.__init__(self) self.SetEventType(NEW_FRAME_EVENT) class DrawFrame(wx.Frame): def __init__(self, *args, **kwargs): wx.Frame.__init__(self, *args, **kwargs) ## Set up the MenuBar MenuBar = wx.MenuBar() file_menu = wx.Menu() file_menu.Append(ID_OPEN, "&Open map","Open a bna file") wx.EVT_MENU(self, ID_OPEN, self.Open_bna) file_menu.AppendSeparator() file_menu.Append(ID_RUN_MOVIE, "Run &Movie","Run a movie of the trajectory") wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) file_menu.Append(ID_RUNONTOP_MOVIE, "Run On Top &Movie","Run a movie of the trajectory on top of existing") wx.EVT_MENU(self, ID_RUNONTOP_MOVIE, self.RunOnTop_Movie) file_menu.Append(ID_RERUN_MOVIE, "Re Run &Movie","Re-Run the existing movie of the trajectory") wx.EVT_MENU(self, ID_RERUN_MOVIE, self.ReRun_Movie) file_menu.AppendSeparator() file_menu.Append(ID_EXIT_MENU, "E&xit","Terminate the program") wx.EVT_MENU(self, ID_EXIT_MENU, self.OnQuit) wx.EVT_MENU(self, ID_RUN_MOVIE, self.Run_Movie) MenuBar.Append(file_menu, "&File") view_menu = wx.Menu() view_menu.Append(ID_ZOOM_TO_FIT_MENU, "Zoom to &Fit","Zoom to fit the window") wx.EVT_MENU(self, ID_ZOOM_TO_FIT_MENU,self.ZoomToFit) view_menu.Append(ID_SET_FRAMERATE_MENU, "Set Frame &Rate","Set the Frame Rate for Movie playback") wx.EVT_MENU(self, ID_SET_FRAMERATE_MENU,self.SetFrameRate) MenuBar.Append(view_menu, "&View") help_menu = wx.Menu() help_menu.Append(ID_ABOUT_MENU, "&About", "More information About this program") wx.EVT_MENU(self, ID_ABOUT_MENU, self.OnAbout) MenuBar.Append(help_menu, "&Help") self.SetMenuBar(MenuBar) self.CreateStatusBar() self.SetStatusText("") wx.EVT_CLOSE(self, self.OnCloseWindow) # Add the Canvas self.NavCanvas = NavCanvas.NavCanvas(self,-1,(500,500), ProjectionFun = 'FlatEarth', Debug = 0, #BackgroundColor = "DARK SLATE BLUE") BackgroundColor = "WHITE", #UseBackground = 1, ).Canvas self.Canvas = NavCanvas.Canvas self.Canvas.NumBetweenBlits = 20 tb = self.NavCanvas.ToolBar tb.AddSeparator() RewindButton = wx.Button(tb, -1, "Rewind") tb.AddControl(RewindButton) wx.EVT_BUTTON(self, RewindButton.GetId() , self.Rewind) StopButton = wx.Button(tb, -1, "Stop") tb.AddControl(StopButton) wx.EVT_BUTTON(self, StopButton.GetId() , self.Stop) PlayButton = wx.Button(tb, -1, "Play") tb.AddControl(PlayButton) wx.EVT_BUTTON(self, PlayButton.GetId() ,self.Play) tb.Realize() self.Show(True) self.LE_movie = None self.LEsObjects = [] self.TimeStep = 0 self.FrameDelay = 10 # milliseconds self.FileDialog = wx.FileDialog(self, "Pick a file",".","","*",wx.OPEN) self.Timer = wx.PyTimer(self.ShowFrame) return None def Open_bna(self, event): dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() self.LoadMap(filename) def LoadMap(self, filename): self.Canvas.Clear() try: shorelines = hazmat.read_bna(filename,polytype = "PolygonSet") for shoreline in shorelines: self.Canvas.AddPolygon(shoreline, LineWidth = 1, LineColor = "Black", FillColor = "Brown", FillStyle = 'Solid', Foreground = 0) self.Canvas.ZoomToBB() except: dlg = wx.MessageDialog(self, 'There was something wrong with the selected map file', 'View Trajectories', wx.OK | wx.ICON_ERROR) dlg.ShowModal() dlg.Destroy() def Load_Movie(self, event): import glob dlg = self.FileDialog if dlg.ShowModal() == wx.ID_OK: filename = dlg.GetPath() (self.LE_movie,(NumTimesteps,NumLEs),HeaderData,flags) = TAP_mod.ReadTrajectory(filename) wx.GetApp().Yield() return True else: return None def Run_Movie(self, event): if self.Load_Movie(None): if self.LEsObjects: self.Canvas.RemoveObjects(self.LEsObjects) self.LEsObjects = [] self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = "Black", Diameter = 1.5,Foreground = 1)) CurrentColor[0] = 1 self.ReRun_Movie(None) def RunOnTop_Movie(self, event): if self.Load_Movie(None): for object in self.LEsObjects: object.PutInBackground() self.LEsObjects.append(self.Canvas.AddPointSet(self.LE_movie[0], Color = GetColor(), Diameter = 1.5,Foreground = 1) ) self.ReRun_Movie(None) def ReRun_Movie(self, event): if not self.LE_movie: self.Run_Movie(None) else: self.Play(None) ## def UpdateThread(self): ## try: ## while hasattr(self, 'event') and not self.event.isSet(): ## wx.PostEvent(self, FrameEvent()) ## self.event.wait(self.FrameDelay) ## except wx.PyDeadObjectError: # BUG: we were destroyed ## return def Running(self): """Returns true if the animation is running""" return self.Timer.IsRunning() def Play(self,event): """Start the animation""" if not self.Running(): if self.LE_movie: #self.event.clear() #thread = threading.Thread(target = self.UpdateThread) #thread.start() self.Timer.Start(self.FrameDelay) else: self.Run_Movie(None) def Stop(self,event): self.Timer.Stop() def ShowFrame(self): if self.TimeStep < len(self.LE_movie): self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) # this sets the data for the next frame self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.Canvas.Draw() self.TimeStep += 1 wx.GetApp().Yield(True) else: self.Timer.Stop() def Rewind(self,event): self.TimeStep = 0 if self.LE_movie: self.LEsObjects[-1].SetPoints(self.LE_movie[self.TimeStep]) self.SetStatusText("Timestep # %i of %i"%(self.TimeStep+1,len(self.LE_movie))) self.Canvas.Draw() def OnAbout(self, event): dlg = wx.MessageDialog(self, "This is a small program to demonstrate\n" "the use of the FloatCanvas\n", "About Me", wx.OK | wx.ICON_INFORMATION) dlg.ShowModal() dlg.Destroy() def ZoomToFit(self,event): self.Canvas.ZoomToBB() def Clear(self,event = None): self.Canvas.Clear() self.Canvas.Draw() def OnQuit(self,event): self.Close(True) def OnCloseWindow(self, event): self.Destroy() def RunMovie(self,event = None): import RandomArray start = clock() shift = RandomArray.randint(0,0,(2,)) NumFrames = 50 for i in range(NumFrames): points = self.LEs.Points shift = RandomArray.randint(-5,5,(2,)) points += shift self.LEs.SetPoints(points) self.Canvas.Draw() print "running the movie took %f seconds to disply %i frames"%((clock() - start),NumFrames) def SetFrameRate(self,event): dlg = wx.TextEntryDialog(self, 'Please set the time between frames in milliseconds', 'ViewTrajectories', "%i"%self.FrameDelay) dlg.SetValue("%i"%self.FrameDelay) if dlg.ShowModal() == wx.ID_OK: try: self.FrameDelay = int(dlg.GetValue()) except: pass dlg.Destroy() class TrajectoryViewer(wx.App): """ Any bugs, comments, feedback, questions, and especially code are welcome: -Chris Barker Chris.Barker@noaa.gov """ def OnInit(self):
if __name__ == "__main__": app = TrajectoryViewer(0) app.MainLoop()
frame = DrawFrame(None, title="Trajectory Viewer", size=(700,700)) self.SetTopWindow(frame) return True
identifier_body
cluster_instances.js
$(document).ready(function () { //turn to inline mode $.fn.editable.defaults.mode = 'inline'; $("input[type='radio'][name='slaveof_type']").bind("change", function () { slaveOfTypeChange(); }); // 加载table数据 $('#redisServerTable').bootstrapTable(); // 开启监控数据 setInterval(monitorStatus, 20000, true); $('.modal').on('show.bs.modal', centerModals); $(window).on('resize', centerModals); changeContentHeader(clusterDescription, 'NoSQL', 'Redis集群详情'); }); function responseHandler(data) { // 遍历数组并排序 var result = new Array(); for (i in data) { // 找到下一个master节点 if (data[i] && data[i].role == 'MASTER') { result.push(data[i]); // 找到该master节点的所有slave节点 for (j in data) { if (i != j && data[j] && data[j].slaveof == data[i].ip + ":" + data[i].port) { result.push(data[j]); data[j] = null; } } data.splice(i, 1, null); } } // 如果遍历了一次还有剩余,说明该节点目前状态不明确 for (i in data) { if (data[i]) { result.push(data[i]); } } return result; } function slaveOfTypeChange() { var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == "OTHER_INST") { $("#masterInfoDiv").show(); } else { $("#masterInfoDiv").hide(); $("#masterHost").val(''); $("#masterPort").val(''); $("#masterPassword").val(''); } } function cacheSizeFormatter(value) { return value + 'GB'; } function instanceRoleFormatter(value, row, index, field) { if ("MASTER" == value) { return "<b>主节点</b>"; } if ("SLAVE" == value) { if (row.slaveof) { return "└"; } } return ""; } function instanceStatusFormatter(value) { if ("STARTED" == value) { return "运行中"; } if ("STOPPED" == value) { return "已停止"; } return "未运行"; } function instanceTypeFormatter(value) { if (value.indexOf("SENTINEL") != -1) { return "sentinel节点"; } else { return "redis节点"; } } function dateTimeFormatter(value) { return new Date(value).format("yyyy-MM-dd HH:mm:ss"); } // 存储当前行的样式,用于区分每组主从 var classIndex = 2; function rowStyle(row, index) { var classes = ['active', 'success', 'info', 'warning', 'danger']; if (row.status == "IDLE") { return { classes: 'info' }; } if (row.status == "STARTED") { if (row.role == 'MASTER') { // success info两个来回切换 classIndex = 3 - classIndex; } return { classes: classes[classIndex] }; } if (row.status == "STOPPED") { return { classes: 'danger' }; } return {}; } function monitorStatus() { var table = $('#redisServerTable'); var data = table.bootstrapTable('getData'); if (data.length > 0) { $.ajax({ type: "PATCH", url: "/clusters/" + data[0].clusterId + "/instances", complete: function(XMLHttpRequest, textStatus) { switch(XMLHttpRequest.status) { case 205: table.bootstrapTable('refresh'); break; default: break; } } }); } } function startRedisServer() { sendSelectedServerInfo('startup', '启动'); } function stopRedisServer() { sendSelectedServerInfo('shutdown', '停止'); } function sendSelectedServerInfo(command, commandTip) { if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) { return; } var table = $('#redisServerTable'); var hasError = false; var servers = $.map(table.bootstrapTable('getSelections'), fun
confirm("您确认进行删除吗?")) { return; } if (!checkWhetherAllowedRemove(servers)) { return; } table.bootstrapTable('showLoading'); $.ajax({ type: "POST", url: "redis/cluster/delClusterNodes", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", success: function (response) { table.bootstrapTable('hideLoading'); table.bootstrapTable('refresh', {data: response}); } }); } function checkWhetherAllowedRemove(servers) { var result = false; $.ajax({ type: "POST", url: "redis/cluster/getNodesInfo", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", async: false, success: function (data) { if (!data || data.length == 0) { alert("未找到匹配的节点,请重试或检查主机状态!"); return; } var msg = ""; for (var i = 0; i < data.length; i++) { node = data[i]; if (node.slotRanges.length) { msg += "[" + node.ip + ":" + node.port + "],"; } } if (msg != "") { msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!"; alert(msg); } else { result = true; } }, error: function (e) { alert("检测主机状态失败!"); } }); return result; } /** * 主从切换 */ function failover() { var table = $('#redisServerTable'); var servers = table.bootstrapTable('getSelections'); if (servers.length != 1) { alert("您只能选取一个节点执行此操作!"); return; } if (servers[0].role != 'SLAVE') { alert("此操作只能在slave节点上进行,请重新选取slave节点!"); return; } if (!confirm("确定执行主从切换操作?")) { return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/failover/instances/" + servers[0].id, success: function (response) { table.bootstrapTable('refresh'); }, error: function (e) { alert(e.responseJSON.data); }, complete: function(e) { unblockUI(); } }); } function openSlaveOfModal() { var selectedRows = $('#redisServerTable').bootstrapTable('getSelections'); if (selectedRows.length != 1) { alert("请选中一项进行修改!"); return; } var row = selectedRows[0]; if (row.redisVersion == 'redis-sentinel') { alert("该节点为sentinel节点,无法配置主从关系!"); return; } var allRows = $('#redisServerTable').bootstrapTable('getData'); var isWithSentinel = false; for (i = 0; i < allRows.length; i++) { if (allRows[i].redisVersion == 'redis-sentinel') { isWithSentinel = true; break; } } if (isWithSentinel == true) { if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) { return; } } $('#slaveOf_serverInstanceId').val(row.id); $('#slaveOf_clusterId').val(row.clusterId); $('#slaveInfo').val(row.ip + ":" + row.port); $('#slaveOfModal').modal('show'); } function configSlaveOf() { var confirmMsg = "您确认按照如下配置调整主从关系吗?" + "\r" + "从节点:" + $('#slaveInfo').val() + "\r"; var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == 'OTHER_INST') { if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) { alert("请输入正确的主节点HOST、PORT信息"); return; } var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val(); if (masterURI == $('#slaveInfo').val()) { alert("主从节点信息相同,请调整主节点信息"); return; } confirmMsg += "主节点:" + masterURI + "\r" + "主节点访问密码:" + $('#masterPassword').val(); } else { confirmMsg += "主节点: NO ONE"; } if (!confirm(confirmMsg)) { return; } var slaveOfInfo = {}; var type; if (type == 'OTHER_INST') { slaveOfInfo.ip = $("#masterHost").val(); slaveOfInfo.port = $("#masterPort").val(); slaveOfInfo.password = $("#masterPassword").val(); type = "PATCH"; } else { type = "DELETE"; } // blockUI first blockUI("正在保存配置"); var table = $('#redisServerTable'); table.bootstrapTable('showLoading'); $.ajax({ type: type, url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(), data: JSON.stringify(slaveOfInfo), contentType: 'application/json', success: function (response) { alert("配置主从关系成功!"); table.bootstrapTable('refresh'); }, error: function (e) { alert("配置主从关系失败!"); }, complete: function (e) { table.bootstrapTable('hideLoading'); unblockUI(); } }); $('#slaveOfModal').modal('hide'); } function centerModals() { $('.modal').each(function (i) { var $clone = $(this).clone().css('display', 'block').appendTo('body'); var top = Math.round(($clone.height() - $clone.find('.modal-content').height()) / 2); top = top > 0 ? top : 0; $clone.remove(); $(this).find('.modal-content').css("margin-top", top); }); } /** * 更新table中data相关的数据,脏刷新 * @param data */ function updateRows(data) { // 调整顺序 data = responseHandler(data); var table = $('#redisServerTable'); var allRows = table.bootstrapTable('getData'); // 如果是全量更新,则直接调用load方法 if (data.length == allRows.length) { table.bootstrapTable('load', data); return; } // 局部更新,遍历局部修改点 for (var i = 0; i < data.length; i++) { for (var j = 0; j < allRows.length; j++) { // 找到对应行 if (allRows[j].id == data[i].id) { // 若role或slaveof发生了变化,则需要重新排序 var targetIndex = j; if (data[i].role != allRows[j].role || data[i].slaveof != allRows[j]['slaveof']) { if (data[i].role == 'MASTER') { // master 插入到当前位置的下过一个master之前 for (; targetIndex < allRows.length; targetIndex++) { if (allRows[targetIndex].role == 'MASTER') { targetIndex--; break; } } } else { // slave 插入到所属master的位置 for (var p = 0; p < allRows.length; p++) { if (data[i].slaveof == allRows[p]['ip'] + ':' + allRows[p]['port']) { targetIndex = p + 1; break; } } } } var rowData = flatRow(data[i]); if (targetIndex == j) { table.bootstrapTable('updateRow', { index: j, row: rowData }); } else { // 删除旧行 table.bootstrapTable('remove', { field: 'serverInstanceId', values: [data[i].serverInstanceId] }); // 插入新行 if (j < targetIndex) { targetIndex--; } table.bootstrapTable('insertRow', { index: targetIndex, row: rowData }); } break; } } } } /** * 将立体的json修改成扁平的 * @param row * @returns {*} */ function flatRow(row) { row["serverInfo.serverId"] = row.serverInfo.serverId; row["serverInfo.serverType"] = row.serverInfo.serverType; row["serverInfo.ip"] = row.serverInfo.ip; row["serverInfo.host"] = row.serverInfo.host; row["serverInfo.env"] = row.serverInfo.env; row["serverInfo.idc"] = row.serverInfo.idc; row["serverInfo.totalMemory"] = row.serverInfo.totalMemory; row["serverInfo.freeMemory"] = row.serverInfo.freeMemory; row["redisClusterInfo.clusterId"] = row.redisClusterInfo.clusterId; return row; } /** * 将扁平的json恢复成立体的 * @param row * @returns {*} */ function hierarchicalRow(row) { row.serverInfo = { "serverId": row["serverInfo.serverId"], "serverType": row["serverInfo.serverType"], "ip": row["serverInfo.ip"], "host": row["serverInfo.host"], "env": row["serverInfo.env"], "idc": row["serverInfo.idc"], "totalMemory": row["serverInfo.totalMemory"], "freeMemory": row["serverInfo.freeMemory"], }; row.redisClusterInfo = { "clusterId": row["redisClusterInfo.clusterId"] }; return row; } window.nodeIpEvents = { 'click .edit': function (e, value, row, index) { // 防止冒泡,避免触发表格的行选中事件 e.stopPropagation(); loadPage('/nodes/' + row["nodeId"] + '/instances/page'); } }; function instanceLinkFormatter(value, row) { var description = ""; if (row["nodeDescription"]) { description = '<i class="glyphicon glyphicon-flag pull-right text-red" data-toggle="tooltip" data-original-title="' + row["nodeDescription"] + '"></i>'; } return '<a class="edit ml10" href="javascript:void(0);">' + value + description + '</a>'; }
ction (row) { if (row["status"].toLowerCase().indexOf(command) != -1) { hasError = true; return; } return row.id; }); if (hasError) { alert("无法重复操作已开启或停止的服务器!"); return; } if (servers.length < 1) { alert("请至少选择一项!"); return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/" + command + "/instances", data: JSON.stringify(servers), contentType: "application/json", success: function () { table.bootstrapTable('refresh'); unblockUI(); } }); } function delClusterNodes() { var table = $('#redisServerTable'); var servers = $.map(table.bootstrapTable('getSelections'), function (row) { return hierarchicalRow(row); }); if (servers.length < 1) { alert("请至少选择一项!"); return; } ; if (!
identifier_body
cluster_instances.js
$(document).ready(function () { //turn to inline mode $.fn.editable.defaults.mode = 'inline'; $("input[type='radio'][name='slaveof_type']").bind("change", function () { slaveOfTypeChange(); }); // 加载table数据 $('#redisServerTable').bootstrapTable(); // 开启监控数据 setInterval(monitorStatus, 20000, true); $('.modal').on('show.bs.modal', centerModals); $(window).on('resize', centerModals); changeContentHeader(clusterDescription, 'NoSQL', 'Redis集群详情'); }); function responseHandler(data) { // 遍历数组并排序 var result = new Array(); for (i in data) { // 找到下一个master节点 if (data[i] && data[i].role == 'MASTER') { result.push(data[i]); // 找到该master节点的所有slave节点 for (j in data) { if (i != j && data[j] && data[j].slaveof == data[i].ip + ":" + data[i].port) { result.push(data[j]); data[j] = null; } } data.splice(i, 1, null); } } // 如果遍历了一次还有剩余,说明该节点目前状态不明确 for (i in data) { if (data[i]) { result.push(data[i]); } } return result; } function slaveOfTypeChange() { var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == "OTHER_INST") { $("#masterInfoDiv").show(); } else { $("#masterInfoDiv").hide(); $("#masterHost").val(''); $("#masterPort").val(''); $("#masterPassword").val(''); } } function cacheSizeFormatter(value) { return value + 'GB'; } function instanceRoleFormatter(value, row, index, field) { if ("MASTER" == value) { return "<b>主节点</b>"; } if ("SLAVE" == value) { if (row.slaveof) { return "└"; } } return ""; } function instanceStatusFormatter(value) { if ("STARTED" == value) { return "运行中"; } if ("STOPPED" == value) { return "已停止"; } return "未运行"; } function instanceTypeFormatter(value) { if (value.indexOf("SENTINEL") != -1) { return "sentinel节点"; } else { return "redis节点"; } } function dateTimeFormatter(value) { return new Date(value).format("yyyy-MM-dd HH:mm:ss"); } // 存储当前行的样式,用于区分每组主从 var classIndex = 2; function rowStyle(row, index) { var classes = ['active', 'success', 'info', 'warning', 'danger']; if (row.status == "IDLE") { return { classes: 'info' }; } if (row.status == "STARTED") { if (row.role == 'MASTER') { // success info两个来回切换 classIndex = 3 - classIndex; } return { classes: classes[classIndex] }; } if (row.status == "STOPPED") { return { classes: 'danger' }; } return {}; } function monitorStatus() { var table = $('#redisServerTable'); var data = table.bootstrapTable('getData'); if (data.length > 0) { $.ajax({ type: "PATCH", url: "/clusters/" + data[0].clusterId + "/instances", complete: function(XMLHttpRequest, textStatus) { switch(XMLHttpRequest.status) { case 205: table.bootstrapTable('refresh'); break; default: break; } } }); } } function startRedisServer() { sendSelectedServerInfo('startup', '启动'); } function stopRedisServer() { sendSelectedServerInfo('shutdown', '停止'); } function sendSelectedServerInfo(command, commandTip) { if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) { return; } var table = $('#redisServerTable'); var hasError = false; var servers = $.map(table.bootstrapTable('getSelections'), function (row) { if (row["status"].toLowerCase().indexOf(command) != -1) { hasError = true; return; } return row.id; }); if (hasError) { alert("无法重复操作已开启或停止的服务器!"); return; } if (servers.length < 1) { alert("请至少选择一项!"); return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/" + command + "/instances", data: JSON.stringify(servers), contentType: "application/json", success: function () { table.bootstrapTable('refresh'); unblockUI(); } }); } function delClusterNodes() { var table = $('#redisServerTable'); var servers = $.map(table.bootstrapTable('getSelections'), function (row) { return hierarchicalRow(row); }); if (servers.length < 1) { alert("请至少选择一项!"); return; } ; if (!confirm("您确认进行删除吗?")) { return; } if (!checkWhetherAllowedRemove(servers)) { return; } table.bootstrapTable('showLoading'); $.ajax({ type: "POST", url: "redis/cluster/delClusterNodes", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", success: function (response) { table.bootstrapTable('hideLoading'); table.bootstrapTable('refresh', {data: response}); } }); } function checkWhetherAllowedRemove(servers) { var result = false; $.ajax({ type: "POST", url: "redis/cluster/getNodesInfo",
success: function (data) { if (!data || data.length == 0) { alert("未找到匹配的节点,请重试或检查主机状态!"); return; } var msg = ""; for (var i = 0; i < data.length; i++) { node = data[i]; if (node.slotRanges.length) { msg += "[" + node.ip + ":" + node.port + "],"; } } if (msg != "") { msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!"; alert(msg); } else { result = true; } }, error: function (e) { alert("检测主机状态失败!"); } }); return result; } /** * 主从切换 */ function failover() { var table = $('#redisServerTable'); var servers = table.bootstrapTable('getSelections'); if (servers.length != 1) { alert("您只能选取一个节点执行此操作!"); return; } if (servers[0].role != 'SLAVE') { alert("此操作只能在slave节点上进行,请重新选取slave节点!"); return; } if (!confirm("确定执行主从切换操作?")) { return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/failover/instances/" + servers[0].id, success: function (response) { table.bootstrapTable('refresh'); }, error: function (e) { alert(e.responseJSON.data); }, complete: function(e) { unblockUI(); } }); } function openSlaveOfModal() { var selectedRows = $('#redisServerTable').bootstrapTable('getSelections'); if (selectedRows.length != 1) { alert("请选中一项进行修改!"); return; } var row = selectedRows[0]; if (row.redisVersion == 'redis-sentinel') { alert("该节点为sentinel节点,无法配置主从关系!"); return; } var allRows = $('#redisServerTable').bootstrapTable('getData'); var isWithSentinel = false; for (i = 0; i < allRows.length; i++) { if (allRows[i].redisVersion == 'redis-sentinel') { isWithSentinel = true; break; } } if (isWithSentinel == true) { if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) { return; } } $('#slaveOf_serverInstanceId').val(row.id); $('#slaveOf_clusterId').val(row.clusterId); $('#slaveInfo').val(row.ip + ":" + row.port); $('#slaveOfModal').modal('show'); } function configSlaveOf() { var confirmMsg = "您确认按照如下配置调整主从关系吗?" + "\r" + "从节点:" + $('#slaveInfo').val() + "\r"; var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == 'OTHER_INST') { if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) { alert("请输入正确的主节点HOST、PORT信息"); return; } var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val(); if (masterURI == $('#slaveInfo').val()) { alert("主从节点信息相同,请调整主节点信息"); return; } confirmMsg += "主节点:" + masterURI + "\r" + "主节点访问密码:" + $('#masterPassword').val(); } else { confirmMsg += "主节点: NO ONE"; } if (!confirm(confirmMsg)) { return; } var slaveOfInfo = {}; var type; if (type == 'OTHER_INST') { slaveOfInfo.ip = $("#masterHost").val(); slaveOfInfo.port = $("#masterPort").val(); slaveOfInfo.password = $("#masterPassword").val(); type = "PATCH"; } else { type = "DELETE"; } // blockUI first blockUI("正在保存配置"); var table = $('#redisServerTable'); table.bootstrapTable('showLoading'); $.ajax({ type: type, url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(), data: JSON.stringify(slaveOfInfo), contentType: 'application/json', success: function (response) { alert("配置主从关系成功!"); table.bootstrapTable('refresh'); }, error: function (e) { alert("配置主从关系失败!"); }, complete: function (e) { table.bootstrapTable('hideLoading'); unblockUI(); } }); $('#slaveOfModal').modal('hide'); } function centerModals() { $('.modal').each(function (i) { var $clone = $(this).clone().css('display', 'block').appendTo('body'); var top = Math.round(($clone.height() - $clone.find('.modal-content').height()) / 2); top = top > 0 ? top : 0; $clone.remove(); $(this).find('.modal-content').css("margin-top", top); }); } /** * 更新table中data相关的数据,脏刷新 * @param data */ function updateRows(data) { // 调整顺序 data = responseHandler(data); var table = $('#redisServerTable'); var allRows = table.bootstrapTable('getData'); // 如果是全量更新,则直接调用load方法 if (data.length == allRows.length) { table.bootstrapTable('load', data); return; } // 局部更新,遍历局部修改点 for (var i = 0; i < data.length; i++) { for (var j = 0; j < allRows.length; j++) { // 找到对应行 if (allRows[j].id == data[i].id) { // 若role或slaveof发生了变化,则需要重新排序 var targetIndex = j; if (data[i].role != allRows[j].role || data[i].slaveof != allRows[j]['slaveof']) { if (data[i].role == 'MASTER') { // master 插入到当前位置的下过一个master之前 for (; targetIndex < allRows.length; targetIndex++) { if (allRows[targetIndex].role == 'MASTER') { targetIndex--; break; } } } else { // slave 插入到所属master的位置 for (var p = 0; p < allRows.length; p++) { if (data[i].slaveof == allRows[p]['ip'] + ':' + allRows[p]['port']) { targetIndex = p + 1; break; } } } } var rowData = flatRow(data[i]); if (targetIndex == j) { table.bootstrapTable('updateRow', { index: j, row: rowData }); } else { // 删除旧行 table.bootstrapTable('remove', { field: 'serverInstanceId', values: [data[i].serverInstanceId] }); // 插入新行 if (j < targetIndex) { targetIndex--; } table.bootstrapTable('insertRow', { index: targetIndex, row: rowData }); } break; } } } } /** * 将立体的json修改成扁平的 * @param row * @returns {*} */ function flatRow(row) { row["serverInfo.serverId"] = row.serverInfo.serverId; row["serverInfo.serverType"] = row.serverInfo.serverType; row["serverInfo.ip"] = row.serverInfo.ip; row["serverInfo.host"] = row.serverInfo.host; row["serverInfo.env"] = row.serverInfo.env; row["serverInfo.idc"] = row.serverInfo.idc; row["serverInfo.totalMemory"] = row.serverInfo.totalMemory; row["serverInfo.freeMemory"] = row.serverInfo.freeMemory; row["redisClusterInfo.clusterId"] = row.redisClusterInfo.clusterId; return row; } /** * 将扁平的json恢复成立体的 * @param row * @returns {*} */ function hierarchicalRow(row) { row.serverInfo = { "serverId": row["serverInfo.serverId"], "serverType": row["serverInfo.serverType"], "ip": row["serverInfo.ip"], "host": row["serverInfo.host"], "env": row["serverInfo.env"], "idc": row["serverInfo.idc"], "totalMemory": row["serverInfo.totalMemory"], "freeMemory": row["serverInfo.freeMemory"], }; row.redisClusterInfo = { "clusterId": row["redisClusterInfo.clusterId"] }; return row; } window.nodeIpEvents = { 'click .edit': function (e, value, row, index) { // 防止冒泡,避免触发表格的行选中事件 e.stopPropagation(); loadPage('/nodes/' + row["nodeId"] + '/instances/page'); } }; function instanceLinkFormatter(value, row) { var description = ""; if (row["nodeDescription"]) { description = '<i class="glyphicon glyphicon-flag pull-right text-red" data-toggle="tooltip" data-original-title="' + row["nodeDescription"] + '"></i>'; } return '<a class="edit ml10" href="javascript:void(0);">' + value + description + '</a>'; }
data: JSON.stringify(servers), dataType: "json", contentType: "application/json", async: false,
random_line_split
cluster_instances.js
$(document).ready(function () { //turn to inline mode $.fn.editable.defaults.mode = 'inline'; $("input[type='radio'][name='slaveof_type']").bind("change", function () { slaveOfTypeChange(); }); // 加载table数据 $('#redisServerTable').bootstrapTable(); // 开启监控数据 setInterval(monitorStatus, 20000, true); $('.modal').on('show.bs.modal', centerModals); $(window).on('resize', centerModals); changeContentHeader(clusterDescription, 'NoSQL', 'Redis集群详情'); }); function responseHandler(data) { // 遍历数组并排序 var result = new Array(); for (i in data) { // 找到下一个master节点 if (data[i] && data[i].role == 'MASTER') { result.push(data[i]); // 找到该master节点的所有slave节点 for (j in data) { if (i != j && data[j] && data[j].slaveof == data[i].ip + ":" + data[i].port) { result.push(data[j]); data[j] = null; } } data.splice(i, 1, null); } } // 如果遍历了一次还有剩余,说明该节点目前状态不明确 for (i in data) { if (data[i]) { result.push(data[i]); } } return result; } function slaveOfTypeChange() { var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == "OTHER_INST") { $("#masterInfoDiv").show(); } else { $("#masterInfoDiv").hide(); $("#masterHost").val(''); $("#masterPort").val(''); $("#masterPassword").val(''); } } function cacheSizeFormatter(value) { return value + 'GB'; } function instanceRoleFormatter(value, row, index, field) { if ("MASTER" == value) { return "<b>主节点</b>"; } if ("SLAVE" == value) { if (row.slaveof) { return "└"; } } return ""; } function instanceStatusFormatter(value) { if ("STARTED" == value) { return "运行中"; } if ("STOPPED" == value) { return "已停止"; } return "未运行"; } function instanceTypeFormatter(value) { if (value.indexOf("SENTINEL") != -1) { return "sentinel节点"; } else { return "redis节点"; } } function dateTimeFormatter(value) { return new Date(value).format("yyyy-MM-dd HH:mm:ss"); } // 存储当前行的样式,用于区分每组主从 var classIndex = 2; function rowStyle(row, index) { var classes = ['active', 'success', 'info', 'warning', 'danger']; if (row.status == "IDLE") { return { classes: 'info' }; } if (row.status == "STARTED") { if (row.role == 'MASTER') { // success info两个来回切换 classIndex = 3 - classIndex; } return { classes: classes[classIndex] }; } if (row.status == "STOPPED") { return { classes: 'danger' }; } return {}; } function monitorStatus() { var table = $('#redisServerTable'); var data = table.bootstrapTable('getData'); if (data.length > 0) { $.ajax({ type: "PATCH", url: "/clusters/" + data[0].clusterId + "/instances", complete: function(XMLHttpRequest, textStatus) { switch(XMLHttpRequest.status) { case 205: table.bootstrapTable('refresh'); break; default: break; } } }); } } function startRedisServer() { sendSelectedServerInfo('startup', '启动'); } function stopRedisServer() { sendSelectedServerInfo('shutdown', '停止'); } function sendSelectedServerInfo(command, commandTip) { if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) { return; } var table = $('#redisServerTable'); var hasError = false; var servers = $.map(table.bootstrapTable('getSelections'), function (row) { if (row["status"].toLowerCase().indexOf(command) != -1) { hasError = true; return; } return row.id; }); if (hasError) { alert("无法重复操作已开启或停止的服务器!"); return; } if (servers.length < 1) { alert("请至少选择一项!"); return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/" + command + "/instances", data: JSON.stringify(servers), contentType: "application/json", success: function () { table.bootstrapTable('refresh'); unblockUI(); } }); } function delClusterNodes() { var table = $('#redisServerTable'); var servers = $.map(table.bootstrapTable('getSelections'), function (row) { return hierarchicalRow(row); }); if (servers.length < 1) { alert("请至少选择一项!"); return; } ; if (!confirm("您确认进行删除吗?")) { return; } if (!checkWhetherAllowedRemove(servers)) { return; } table.bootstrapTable('showLoading'); $.ajax({ type: "POST", url: "redis/cluster/delClusterNodes", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", success: function (response) { table.bootstrapTable('hideLoading'); table.bootstrapTable('refresh', {data: response}); } }); } function checkWhetherAllowedRemove(servers) { var result = false; $.ajax({ type: "POST", url: "redis/cluster/getNodesInfo", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", async: false, success: function (data) { if (!data
alert("未找到匹配的节点,请重试或检查主机状态!"); return; } var msg = ""; for (var i = 0; i < data.length; i++) { node = data[i]; if (node.slotRanges.length) { msg += "[" + node.ip + ":" + node.port + "],"; } } if (msg != "") { msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!"; alert(msg); } else { result = true; } }, error: function (e) { alert("检测主机状态失败!"); } }); return result; } /** * 主从切换 */ function failover() { var table = $('#redisServerTable'); var servers = table.bootstrapTable('getSelections'); if (servers.length != 1) { alert("您只能选取一个节点执行此操作!"); return; } if (servers[0].role != 'SLAVE') { alert("此操作只能在slave节点上进行,请重新选取slave节点!"); return; } if (!confirm("确定执行主从切换操作?")) { return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/failover/instances/" + servers[0].id, success: function (response) { table.bootstrapTable('refresh'); }, error: function (e) { alert(e.responseJSON.data); }, complete: function(e) { unblockUI(); } }); } function openSlaveOfModal() { var selectedRows = $('#redisServerTable').bootstrapTable('getSelections'); if (selectedRows.length != 1) { alert("请选中一项进行修改!"); return; } var row = selectedRows[0]; if (row.redisVersion == 'redis-sentinel') { alert("该节点为sentinel节点,无法配置主从关系!"); return; } var allRows = $('#redisServerTable').bootstrapTable('getData'); var isWithSentinel = false; for (i = 0; i < allRows.length; i++) { if (allRows[i].redisVersion == 'redis-sentinel') { isWithSentinel = true; break; } } if (isWithSentinel == true) { if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) { return; } } $('#slaveOf_serverInstanceId').val(row.id); $('#slaveOf_clusterId').val(row.clusterId); $('#slaveInfo').val(row.ip + ":" + row.port); $('#slaveOfModal').modal('show'); } function configSlaveOf() { var confirmMsg = "您确认按照如下配置调整主从关系吗?" + "\r" + "从节点:" + $('#slaveInfo').val() + "\r"; var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == 'OTHER_INST') { if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) { alert("请输入正确的主节点HOST、PORT信息"); return; } var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val(); if (masterURI == $('#slaveInfo').val()) { alert("主从节点信息相同,请调整主节点信息"); return; } confirmMsg += "主节点:" + masterURI + "\r" + "主节点访问密码:" + $('#masterPassword').val(); } else { confirmMsg += "主节点: NO ONE"; } if (!confirm(confirmMsg)) { return; } var slaveOfInfo = {}; var type; if (type == 'OTHER_INST') { slaveOfInfo.ip = $("#masterHost").val(); slaveOfInfo.port = $("#masterPort").val(); slaveOfInfo.password = $("#masterPassword").val(); type = "PATCH"; } else { type = "DELETE"; } // blockUI first blockUI("正在保存配置"); var table = $('#redisServerTable'); table.bootstrapTable('showLoading'); $.ajax({ type: type, url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(), data: JSON.stringify(slaveOfInfo), contentType: 'application/json', success: function (response) { alert("配置主从关系成功!"); table.bootstrapTable('refresh'); }, error: function (e) { alert("配置主从关系失败!"); }, complete: function (e) { table.bootstrapTable('hideLoading'); unblockUI(); } }); $('#slaveOfModal').modal('hide'); } function centerModals() { $('.modal').each(function (i) { var $clone = $(this).clone().css('display', 'block').appendTo('body'); var top = Math.round(($clone.height() - $clone.find('.modal-content').height()) / 2); top = top > 0 ? top : 0; $clone.remove(); $(this).find('.modal-content').css("margin-top", top); }); } /** * 更新table中data相关的数据,脏刷新 * @param data */ function updateRows(data) { // 调整顺序 data = responseHandler(data); var table = $('#redisServerTable'); var allRows = table.bootstrapTable('getData'); // 如果是全量更新,则直接调用load方法 if (data.length == allRows.length) { table.bootstrapTable('load', data); return; } // 局部更新,遍历局部修改点 for (var i = 0; i < data.length; i++) { for (var j = 0; j < allRows.length; j++) { // 找到对应行 if (allRows[j].id == data[i].id) { // 若role或slaveof发生了变化,则需要重新排序 var targetIndex = j; if (data[i].role != allRows[j].role || data[i].slaveof != allRows[j]['slaveof']) { if (data[i].role == 'MASTER') { // master 插入到当前位置的下过一个master之前 for (; targetIndex < allRows.length; targetIndex++) { if (allRows[targetIndex].role == 'MASTER') { targetIndex--; break; } } } else { // slave 插入到所属master的位置 for (var p = 0; p < allRows.length; p++) { if (data[i].slaveof == allRows[p]['ip'] + ':' + allRows[p]['port']) { targetIndex = p + 1; break; } } } } var rowData = flatRow(data[i]); if (targetIndex == j) { table.bootstrapTable('updateRow', { index: j, row: rowData }); } else { // 删除旧行 table.bootstrapTable('remove', { field: 'serverInstanceId', values: [data[i].serverInstanceId] }); // 插入新行 if (j < targetIndex) { targetIndex--; } table.bootstrapTable('insertRow', { index: targetIndex, row: rowData }); } break; } } } } /** * 将立体的json修改成扁平的 * @param row * @returns {*} */ function flatRow(row) { row["serverInfo.serverId"] = row.serverInfo.serverId; row["serverInfo.serverType"] = row.serverInfo.serverType; row["serverInfo.ip"] = row.serverInfo.ip; row["serverInfo.host"] = row.serverInfo.host; row["serverInfo.env"] = row.serverInfo.env; row["serverInfo.idc"] = row.serverInfo.idc; row["serverInfo.totalMemory"] = row.serverInfo.totalMemory; row["serverInfo.freeMemory"] = row.serverInfo.freeMemory; row["redisClusterInfo.clusterId"] = row.redisClusterInfo.clusterId; return row; } /** * 将扁平的json恢复成立体的 * @param row * @returns {*} */ function hierarchicalRow(row) { row.serverInfo = { "serverId": row["serverInfo.serverId"], "serverType": row["serverInfo.serverType"], "ip": row["serverInfo.ip"], "host": row["serverInfo.host"], "env": row["serverInfo.env"], "idc": row["serverInfo.idc"], "totalMemory": row["serverInfo.totalMemory"], "freeMemory": row["serverInfo.freeMemory"], }; row.redisClusterInfo = { "clusterId": row["redisClusterInfo.clusterId"] }; return row; } window.nodeIpEvents = { 'click .edit': function (e, value, row, index) { // 防止冒泡,避免触发表格的行选中事件 e.stopPropagation(); loadPage('/nodes/' + row["nodeId"] + '/instances/page'); } }; function instanceLinkFormatter(value, row) { var description = ""; if (row["nodeDescription"]) { description = '<i class="glyphicon glyphicon-flag pull-right text-red" data-toggle="tooltip" data-original-title="' + row["nodeDescription"] + '"></i>'; } return '<a class="edit ml10" href="javascript:void(0);">' + value + description + '</a>'; }
|| data.length == 0) {
identifier_name
cluster_instances.js
$(document).ready(function () { //turn to inline mode $.fn.editable.defaults.mode = 'inline'; $("input[type='radio'][name='slaveof_type']").bind("change", function () { slaveOfTypeChange(); }); // 加载table数据 $('#redisServerTable').bootstrapTable(); // 开启监控数据 setInterval(monitorStatus, 20000, true); $('.modal').on('show.bs.modal', centerModals); $(window).on('resize', centerModals); changeContentHeader(clusterDescription, 'NoSQL', 'Redis集群详情'); }); function responseHandler(data) { // 遍历数组并排序 var result = new Array(); for (i in data) { // 找到下一个master节点 if (data[i] && data[i].role == 'MASTER') { result.push(data[i]); // 找到该master节点的所有slave节点 for (j in data) { if (i != j && data[j] && data[j].slaveof == data[i].ip + ":" + data[i].port) { result.push(data[j]); data[j] = null; } } data.splice(i, 1, null); } } // 如果遍历了一次还有剩余,说明该节点目前状态不明确 for (i in data) { if (data[i]) { result.push(data[i]); } } return result; } function slaveOfTypeChange() { var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == "OTHER_INST") { $("#masterInfoDiv").show(); } else { $("#masterInfoDiv").hide(); $("#masterHost").val(''); $("#masterPort").val(''); $("#master
) { if ("MASTER" == value) { return "<b>主节点</b>"; } if ("SLAVE" == value) { if (row.slaveof) { return "└"; } } return ""; } function instanceStatusFormatter(value) { if ("STARTED" == value) { return "运行中"; } if ("STOPPED" == value) { return "已停止"; } return "未运行"; } function instanceTypeFormatter(value) { if (value.indexOf("SENTINEL") != -1) { return "sentinel节点"; } else { return "redis节点"; } } function dateTimeFormatter(value) { return new Date(value).format("yyyy-MM-dd HH:mm:ss"); } // 存储当前行的样式,用于区分每组主从 var classIndex = 2; function rowStyle(row, index) { var classes = ['active', 'success', 'info', 'warning', 'danger']; if (row.status == "IDLE") { return { classes: 'info' }; } if (row.status == "STARTED") { if (row.role == 'MASTER') { // success info两个来回切换 classIndex = 3 - classIndex; } return { classes: classes[classIndex] }; } if (row.status == "STOPPED") { return { classes: 'danger' }; } return {}; } function monitorStatus() { var table = $('#redisServerTable'); var data = table.bootstrapTable('getData'); if (data.length > 0) { $.ajax({ type: "PATCH", url: "/clusters/" + data[0].clusterId + "/instances", complete: function(XMLHttpRequest, textStatus) { switch(XMLHttpRequest.status) { case 205: table.bootstrapTable('refresh'); break; default: break; } } }); } } function startRedisServer() { sendSelectedServerInfo('startup', '启动'); } function stopRedisServer() { sendSelectedServerInfo('shutdown', '停止'); } function sendSelectedServerInfo(command, commandTip) { if (!confirm("您确认进行\"" + commandTip + "\"操作吗?")) { return; } var table = $('#redisServerTable'); var hasError = false; var servers = $.map(table.bootstrapTable('getSelections'), function (row) { if (row["status"].toLowerCase().indexOf(command) != -1) { hasError = true; return; } return row.id; }); if (hasError) { alert("无法重复操作已开启或停止的服务器!"); return; } if (servers.length < 1) { alert("请至少选择一项!"); return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/" + command + "/instances", data: JSON.stringify(servers), contentType: "application/json", success: function () { table.bootstrapTable('refresh'); unblockUI(); } }); } function delClusterNodes() { var table = $('#redisServerTable'); var servers = $.map(table.bootstrapTable('getSelections'), function (row) { return hierarchicalRow(row); }); if (servers.length < 1) { alert("请至少选择一项!"); return; } ; if (!confirm("您确认进行删除吗?")) { return; } if (!checkWhetherAllowedRemove(servers)) { return; } table.bootstrapTable('showLoading'); $.ajax({ type: "POST", url: "redis/cluster/delClusterNodes", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", success: function (response) { table.bootstrapTable('hideLoading'); table.bootstrapTable('refresh', {data: response}); } }); } function checkWhetherAllowedRemove(servers) { var result = false; $.ajax({ type: "POST", url: "redis/cluster/getNodesInfo", data: JSON.stringify(servers), dataType: "json", contentType: "application/json", async: false, success: function (data) { if (!data || data.length == 0) { alert("未找到匹配的节点,请重试或检查主机状态!"); return; } var msg = ""; for (var i = 0; i < data.length; i++) { node = data[i]; if (node.slotRanges.length) { msg += "[" + node.ip + ":" + node.port + "],"; } } if (msg != "") { msg = msg.substring(0, msg.length - 1) + "仍有slot分布,请先迁移slot!"; alert(msg); } else { result = true; } }, error: function (e) { alert("检测主机状态失败!"); } }); return result; } /** * 主从切换 */ function failover() { var table = $('#redisServerTable'); var servers = table.bootstrapTable('getSelections'); if (servers.length != 1) { alert("您只能选取一个节点执行此操作!"); return; } if (servers[0].role != 'SLAVE') { alert("此操作只能在slave节点上进行,请重新选取slave节点!"); return; } if (!confirm("确定执行主从切换操作?")) { return; } blockUI("正在执行操作"); $.ajax({ type: "PATCH", url: "/failover/instances/" + servers[0].id, success: function (response) { table.bootstrapTable('refresh'); }, error: function (e) { alert(e.responseJSON.data); }, complete: function(e) { unblockUI(); } }); } function openSlaveOfModal() { var selectedRows = $('#redisServerTable').bootstrapTable('getSelections'); if (selectedRows.length != 1) { alert("请选中一项进行修改!"); return; } var row = selectedRows[0]; if (row.redisVersion == 'redis-sentinel') { alert("该节点为sentinel节点,无法配置主从关系!"); return; } var allRows = $('#redisServerTable').bootstrapTable('getData'); var isWithSentinel = false; for (i = 0; i < allRows.length; i++) { if (allRows[i].redisVersion == 'redis-sentinel') { isWithSentinel = true; break; } } if (isWithSentinel == true) { if (!confirm("带sentinel监控的集群sentinel可能会自主恢复为配置前的主从状态,建议手工移除或更新sentinel节点,请确认知晓该情况!")) { return; } } $('#slaveOf_serverInstanceId').val(row.id); $('#slaveOf_clusterId').val(row.clusterId); $('#slaveInfo').val(row.ip + ":" + row.port); $('#slaveOfModal').modal('show'); } function configSlaveOf() { var confirmMsg = "您确认按照如下配置调整主从关系吗?" + "\r" + "从节点:" + $('#slaveInfo').val() + "\r"; var type = $("input[type='radio'][name='slaveof_type']:checked").val(); if (type == 'OTHER_INST') { if ("" == $("#masterHost").val() || "" == $("#masterPort").val()) { alert("请输入正确的主节点HOST、PORT信息"); return; } var masterURI = $('#masterHost').val() + ":" + $('#masterPort').val(); if (masterURI == $('#slaveInfo').val()) { alert("主从节点信息相同,请调整主节点信息"); return; } confirmMsg += "主节点:" + masterURI + "\r" + "主节点访问密码:" + $('#masterPassword').val(); } else { confirmMsg += "主节点: NO ONE"; } if (!confirm(confirmMsg)) { return; } var slaveOfInfo = {}; var type; if (type == 'OTHER_INST') { slaveOfInfo.ip = $("#masterHost").val(); slaveOfInfo.port = $("#masterPort").val(); slaveOfInfo.password = $("#masterPassword").val(); type = "PATCH"; } else { type = "DELETE"; } // blockUI first blockUI("正在保存配置"); var table = $('#redisServerTable'); table.bootstrapTable('showLoading'); $.ajax({ type: type, url: "/replication/instances/" + $("#slaveOf_serverInstanceId").val(), data: JSON.stringify(slaveOfInfo), contentType: 'application/json', success: function (response) { alert("配置主从关系成功!"); table.bootstrapTable('refresh'); }, error: function (e) { alert("配置主从关系失败!"); }, complete: function (e) { table.bootstrapTable('hideLoading'); unblockUI(); } }); $('#slaveOfModal').modal('hide'); } function centerModals() { $('.modal').each(function (i) { var $clone = $(this).clone().css('display', 'block').appendTo('body'); var top = Math.round(($clone.height() - $clone.find('.modal-content').height()) / 2); top = top > 0 ? top : 0; $clone.remove(); $(this).find('.modal-content').css("margin-top", top); }); } /** * 更新table中data相关的数据,脏刷新 * @param data */ function updateRows(data) { // 调整顺序 data = responseHandler(data); var table = $('#redisServerTable'); var allRows = table.bootstrapTable('getData'); // 如果是全量更新,则直接调用load方法 if (data.length == allRows.length) { table.bootstrapTable('load', data); return; } // 局部更新,遍历局部修改点 for (var i = 0; i < data.length; i++) { for (var j = 0; j < allRows.length; j++) { // 找到对应行 if (allRows[j].id == data[i].id) { // 若role或slaveof发生了变化,则需要重新排序 var targetIndex = j; if (data[i].role != allRows[j].role || data[i].slaveof != allRows[j]['slaveof']) { if (data[i].role == 'MASTER') { // master 插入到当前位置的下过一个master之前 for (; targetIndex < allRows.length; targetIndex++) { if (allRows[targetIndex].role == 'MASTER') { targetIndex--; break; } } } else { // slave 插入到所属master的位置 for (var p = 0; p < allRows.length; p++) { if (data[i].slaveof == allRows[p]['ip'] + ':' + allRows[p]['port']) { targetIndex = p + 1; break; } } } } var rowData = flatRow(data[i]); if (targetIndex == j) { table.bootstrapTable('updateRow', { index: j, row: rowData }); } else { // 删除旧行 table.bootstrapTable('remove', { field: 'serverInstanceId', values: [data[i].serverInstanceId] }); // 插入新行 if (j < targetIndex) { targetIndex--; } table.bootstrapTable('insertRow', { index: targetIndex, row: rowData }); } break; } } } } /** * 将立体的json修改成扁平的 * @param row * @returns {*} */ function flatRow(row) { row["serverInfo.serverId"] = row.serverInfo.serverId; row["serverInfo.serverType"] = row.serverInfo.serverType; row["serverInfo.ip"] = row.serverInfo.ip; row["serverInfo.host"] = row.serverInfo.host; row["serverInfo.env"] = row.serverInfo.env; row["serverInfo.idc"] = row.serverInfo.idc; row["serverInfo.totalMemory"] = row.serverInfo.totalMemory; row["serverInfo.freeMemory"] = row.serverInfo.freeMemory; row["redisClusterInfo.clusterId"] = row.redisClusterInfo.clusterId; return row; } /** * 将扁平的json恢复成立体的 * @param row * @returns {*} */ function hierarchicalRow(row) { row.serverInfo = { "serverId": row["serverInfo.serverId"], "serverType": row["serverInfo.serverType"], "ip": row["serverInfo.ip"], "host": row["serverInfo.host"], "env": row["serverInfo.env"], "idc": row["serverInfo.idc"], "totalMemory": row["serverInfo.totalMemory"], "freeMemory": row["serverInfo.freeMemory"], }; row.redisClusterInfo = { "clusterId": row["redisClusterInfo.clusterId"] }; return row; } window.nodeIpEvents = { 'click .edit': function (e, value, row, index) { // 防止冒泡,避免触发表格的行选中事件 e.stopPropagation(); loadPage('/nodes/' + row["nodeId"] + '/instances/page'); } }; function instanceLinkFormatter(value, row) { var description = ""; if (row["nodeDescription"]) { description = '<i class="glyphicon glyphicon-flag pull-right text-red" data-toggle="tooltip" data-original-title="' + row["nodeDescription"] + '"></i>'; } return '<a class="edit ml10" href="javascript:void(0);">' + value + description + '</a>'; }
Password").val(''); } } function cacheSizeFormatter(value) { return value + 'GB'; } function instanceRoleFormatter(value, row, index, field
conditional_block
debug-trace-player.ts
import { DebugTrace, SlotInfo } from '../debug-trace/debug-trace'; // The TraceOp enum must stay in sync with SkSL::SkVMTraceInfo::Op. enum TraceOp { Line = 0, Var = 1, Enter = 2, Exit = 3, Scope = 4, } // The NumberKind enum must stay in sync with SkSL::Type::NumberKind. enum NumberKind { Float = 0, Signed = 1, Unsigned = 2, Boolean = 3, Nonnumeric = 4, } // Trace data comes in from the JSON as a number[]. We unpack it into a TraceInfo for ease of use. type TraceInfo = { op: TraceOp; data: number[]; }; type StackFrame = { // A FunctionInfo from trace.functions. func: number; // The current line number within the function. line: number; // Any variable slots which have been touched in this function. displayMask: boolean[]; }; type Slot = { // The current raw value held in this slot (as a 32-bit integer, not bit-punned). value: number; // The scope depth associated with this slot (as indicated by trace_scope). scope: number; // When was the variable in this slot most recently written? (as a cursor position) writeTime: number; }; export type VariableData = { // A SlotInfo from trace.slots. slotIndex: number; // Has this slot been written-to since the last step call? dirty: boolean; // The current value held in this slot (properly bit-punned/cast to the expected type) value: number | boolean; }; export class DebugTracePlayer { private trace: DebugTrace | null = null; // The position of the read head within the trace array. private cursor: number = 0; // Tracks the current scope depth (as indicated by trace_scope). private scope: number = 0; // Tracks assignments into our data slots. private slots: Slot[] = []; // Tracks the trace stack (as indicated by trace_enter and trace_exit). private stack: StackFrame[] = []; // the execution stack // Tracks which line numbers are reached by the trace, and the number of times it's reached. private lineNumbers: Map<number, number> = new Map(); // Tracks all the data slots which have been touched during the current step. private dirtyMask: boolean[] = []; // Tracks all the data slots which hold function return values. private returnValues: boolean[] = []; // Tracks line numbers that have breakpoints set on them. private breakpointLines: Set<number> = new Set(); /** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */ private check(result: boolean): void { if (!result) { throw new Error('check failed'); } } /** Copies trace info from the JSON number array into a TraceInfo struct. */ private getTraceInfo(position: number): TraceInfo { this.check(position < this.trace!.trace.length); this.check(this.trace!.trace[position][0] in TraceOp); const info: TraceInfo = { op: this.trace!.trace[position][0] as TraceOp, data: this.trace!.trace[position].slice(1), }; return info; } /** Resets playback to the start of the trace. Breakpoints are not cleared. */ public reset(trace: DebugTrace | null): void { const nslots = trace?.slots?.length ?? 0; const globalStackFrame: StackFrame = { func: -1, line: -1, displayMask: Array<boolean>(nslots).map(() => false), }; this.trace = trace; this.cursor = 0; this.slots = []; this.stack = [globalStackFrame]; this.dirtyMask = Array<boolean>(nslots).map(() => false); this.returnValues = Array<boolean>(nslots).map(() => false); if (trace !== null) { this.slots = trace.slots.map( (): Slot => ({ value: 0, scope: Infinity, writeTime: 0, }) ); this.returnValues = trace.slots.map( (slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0 ); // Build a map holding the number of times each line is reached. this.lineNumbers.clear(); trace.trace.forEach((_, traceIdx: number) => { const info: TraceInfo = this.getTraceInfo(traceIdx); if (info.op === TraceOp.Line) { const lineNumber = info.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.lineNumbers.set(lineNumber, lineCount + 1); } }); } } /** Advances the simulation to the next Line op. */ public step(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { break; } } } /** * Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs. * Breakpoints will also stop the simulation even if we haven't reached an Exit. */ public stepOver(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { const canEscapeFromThisStackDepth = this.stack.length <= initialStackDepth; if (this.execute(this.cursor++)) { if (canEscapeFromThisStackDepth || this.atBreakpoint()) { break; } } } } /** * Advances the simulation until we exit from the current stack frame. * Breakpoints will also stop the simulation even if we haven't left the stack frame. */ public stepOut(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { const hasEscapedFromInitialStackDepth = this.stack.length < initialStackDepth; if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) { break; } } } } /** Advances the simulation until we hit a breakpoint, or the trace completes. */ public run(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { if (this.atBreakpoint()) { break; } } } } /** * Cleans up temporary state between steps, such as the dirty mask and function return values. */ private tidyState(): void { this.dirtyMask.fill(false); const stackTop = this.stack[this.stack.length - 1]; this.returnValues.forEach((_, slotIdx: number) => { stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx]; }); } /** Returns true if we have reached the end of the trace. */ public traceHasCompleted(): boolean { return this.trace == null || this.cursor >= this.trace.trace.length; } /** Reports the position of the cursor "read head" within the array of trace instructions. */ public getCursor(): number { return this.cursor; } /** Returns true if the current line has a breakpoint set on it. */ public
(): boolean { return this.breakpointLines.has(this.getCurrentLine()); } /** Replaces all current breakpoints with a new set of them. */ public setBreakpoints(breakpointLines: Set<number>): void { this.breakpointLines = breakpointLines; } /** Returns the current set of lines which have a breakpoint. */ public getBreakpoints(): Set<number> { return this.breakpointLines; } /** Adds a breakpoint to a line (if one doesn't exist). */ public addBreakpoint(line: number): void { this.breakpointLines.add(line); } /** Removes a breakpoint from a line (if one exists). */ public removeBreakpoint(line: number): void { this.breakpointLines.delete(line); } /** Retrieves the current line. */ public getCurrentLine(): number { this.check(this.stack.length > 0); return this.stack[this.stack.length - 1].line; } /** Retrieves the current line for a given stack frame. */ public getCurrentLineInStackFrame(stackFrameIndex: number): number { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.stack[stackFrameIndex].line; } /** * Returns every line number reached inside this debug trace, along with the remaining number of * times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice. */ public getLineNumbersReached(): Map<number, number> { return this.lineNumbers; } /** Returns the call stack as an array of FunctionInfo indices. */ public getCallStack(): number[] { this.check(this.stack.length > 0); return this.stack.slice(1).map((frame: StackFrame) => frame.func); } /** Returns the size of the call stack. */ public getStackDepth(): number { this.check(this.stack.length > 0); return this.stack.length - 1; } /** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */ public getSlotComponentSuffix(slotIndex: number): string { const slot: SlotInfo = this.trace!.slots[slotIndex]; if (slot.rows > 1) { return `[${Math.floor(slot.index / slot.rows)}][${ slot.index % slot.rows }]`; } if (slot.columns > 1) { switch (slot.index) { case 0: return '.x'; case 1: return '.y'; case 2: return '.z'; case 3: return '.w'; default: return '[???]'; } } return ''; } /** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */ private interpretValueBits( slotIdx: number, valueBits: number ): number | boolean { const bitArray: Int32Array = new Int32Array(1); bitArray[0] = valueBits; switch (this.trace!.slots[slotIdx].kind) { case NumberKind.Float: return new Float32Array(bitArray.buffer)[0]; case NumberKind.Unsigned: return new Uint32Array(bitArray.buffer)[0]; case NumberKind.Boolean: return valueBits !== 0; case NumberKind.Signed: return valueBits; default: return valueBits; } } /** Returns a vector of the indices and values of each slot that is enabled in `bits`. */ private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] { this.check(displayMask.length === this.slots.length); let vars: VariableData[] = []; displayMask.forEach((_, slot: number) => { if (displayMask[slot]) { const varData: VariableData = { slotIndex: slot, dirty: this.dirtyMask[slot], value: this.interpretValueBits(slot, this.slots[slot].value), }; vars.push(varData); } }); // Order the variable list so that the most recently-written variables are shown at the top. vars = vars.sort((a: VariableData, b: VariableData) => { // Order by descending write-time. const delta = this.slots[b.slotIndex].writeTime - this.slots[a.slotIndex].writeTime; if (delta !== 0) { return delta; } // If write times match, order by ascending slot index (preserving the existing order). return a.slotIndex - b.slotIndex; }); return vars; } /** Returns the variables in a given stack frame. */ public getLocalVariables(stackFrameIndex: number): VariableData[] { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.getVariablesForDisplayMask( this.stack[stackFrameIndex].displayMask ); } /** Returns the variables at global scope. */ public getGlobalVariables(): VariableData[] { if (this.stack.length < 1) { return []; } return this.getVariablesForDisplayMask(this.stack[0].displayMask); } /** Updates fWriteTime for the entire variable at a given slot. */ private updateVariableWriteTime(slotIdx: number, cursor: number): void { // The slotIdx could point to any slot within a variable. // We want to update the write time on EVERY slot associated with this variable. // The SlotInfo's groupIndex gives us enough information to find the affected range. const changedSlot = this.trace!.slots[slotIdx]; slotIdx -= changedSlot.groupIdx!; this.check(slotIdx >= 0); this.check(slotIdx < this.trace!.slots.length); for (;;) { this.slots[slotIdx++].writeTime = cursor; // Stop if we've reached the final slot. if (slotIdx >= this.trace!.slots.length) { break; } // Each separate variable-group starts with a groupIndex of 0; stop when we detect this. if (this.trace!.slots[slotIdx].groupIdx! == 0) { break; } } } /** * Executes the trace op at the passed-in cursor position. Returns true if we've reached a line * or exit trace op, which indicate a stopping point. */ private execute(position: number): boolean { const trace = this.getTraceInfo(position); this.check(this.stack.length > 0); const stackTop: StackFrame = this.stack[this.stack.length - 1]; switch (trace.op) { case TraceOp.Line: { // data: line number, (unused) const lineNumber = trace.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.check(lineNumber >= 0); this.check(lineNumber < this.trace!.source.length); this.check(lineCount > 0); stackTop.line = lineNumber; this.lineNumbers.set(lineNumber, lineCount - 1); return true; } case TraceOp.Var: { // data: slot, value const slotIdx = trace.data[0]; const value = trace.data[1]; this.check(slotIdx >= 0); this.check(slotIdx < this.slots.length); this.slots[slotIdx].value = value; this.slots[slotIdx].scope = Math.min( this.slots[slotIdx].scope, this.scope ); this.updateVariableWriteTime(slotIdx, position); if ((this.trace!.slots[slotIdx].retval ?? -1) < 0) { // Normal variables are associated with the current function. stackTop.displayMask[slotIdx] = true; } else { // Return values are associated with the parent function (since the current function // is exiting and we won't see them there). this.check(this.stack.length > 1); this.stack[this.stack.length - 2].displayMask[slotIdx] = true; } this.dirtyMask[slotIdx] = true; break; } case TraceOp.Enter: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(fnIdx >= 0); this.check(fnIdx < this.trace!.functions.length); const enteredStackFrame: StackFrame = { func: fnIdx, line: -1, displayMask: Array<boolean>(this.slots.length).fill(false), }; this.stack.push(enteredStackFrame); break; } case TraceOp.Exit: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(stackTop.func === fnIdx); this.stack.pop(); return true; } case TraceOp.Scope: { // data: scope delta, (unused) const scopeDelta = trace.data[0]; this.scope += scopeDelta; if (scopeDelta < 0) { // If the scope is being reduced, discard variables that are now out of scope. this.slots.forEach((_, slotIdx: number) => { if (this.scope < this.slots[slotIdx].scope) { this.slots[slotIdx].scope = Infinity; stackTop.displayMask[slotIdx] = false; } }); } break; } default: { throw new Error('unrecognized trace instruction'); } } return false; } }
atBreakpoint
identifier_name
debug-trace-player.ts
import { DebugTrace, SlotInfo } from '../debug-trace/debug-trace'; // The TraceOp enum must stay in sync with SkSL::SkVMTraceInfo::Op. enum TraceOp { Line = 0, Var = 1, Enter = 2, Exit = 3, Scope = 4, } // The NumberKind enum must stay in sync with SkSL::Type::NumberKind. enum NumberKind { Float = 0, Signed = 1, Unsigned = 2, Boolean = 3, Nonnumeric = 4, } // Trace data comes in from the JSON as a number[]. We unpack it into a TraceInfo for ease of use. type TraceInfo = { op: TraceOp; data: number[]; }; type StackFrame = { // A FunctionInfo from trace.functions. func: number; // The current line number within the function. line: number; // Any variable slots which have been touched in this function. displayMask: boolean[]; }; type Slot = { // The current raw value held in this slot (as a 32-bit integer, not bit-punned). value: number; // The scope depth associated with this slot (as indicated by trace_scope). scope: number; // When was the variable in this slot most recently written? (as a cursor position) writeTime: number; }; export type VariableData = { // A SlotInfo from trace.slots. slotIndex: number; // Has this slot been written-to since the last step call? dirty: boolean; // The current value held in this slot (properly bit-punned/cast to the expected type) value: number | boolean; }; export class DebugTracePlayer { private trace: DebugTrace | null = null; // The position of the read head within the trace array. private cursor: number = 0; // Tracks the current scope depth (as indicated by trace_scope). private scope: number = 0; // Tracks assignments into our data slots. private slots: Slot[] = []; // Tracks the trace stack (as indicated by trace_enter and trace_exit). private stack: StackFrame[] = []; // the execution stack // Tracks which line numbers are reached by the trace, and the number of times it's reached. private lineNumbers: Map<number, number> = new Map(); // Tracks all the data slots which have been touched during the current step. private dirtyMask: boolean[] = []; // Tracks all the data slots which hold function return values. private returnValues: boolean[] = []; // Tracks line numbers that have breakpoints set on them. private breakpointLines: Set<number> = new Set(); /** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */ private check(result: boolean): void { if (!result) { throw new Error('check failed'); } } /** Copies trace info from the JSON number array into a TraceInfo struct. */ private getTraceInfo(position: number): TraceInfo { this.check(position < this.trace!.trace.length); this.check(this.trace!.trace[position][0] in TraceOp); const info: TraceInfo = { op: this.trace!.trace[position][0] as TraceOp, data: this.trace!.trace[position].slice(1), }; return info; } /** Resets playback to the start of the trace. Breakpoints are not cleared. */ public reset(trace: DebugTrace | null): void { const nslots = trace?.slots?.length ?? 0; const globalStackFrame: StackFrame = { func: -1, line: -1, displayMask: Array<boolean>(nslots).map(() => false), }; this.trace = trace; this.cursor = 0; this.slots = []; this.stack = [globalStackFrame]; this.dirtyMask = Array<boolean>(nslots).map(() => false); this.returnValues = Array<boolean>(nslots).map(() => false); if (trace !== null) { this.slots = trace.slots.map( (): Slot => ({ value: 0, scope: Infinity, writeTime: 0, }) ); this.returnValues = trace.slots.map( (slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0 ); // Build a map holding the number of times each line is reached. this.lineNumbers.clear(); trace.trace.forEach((_, traceIdx: number) => { const info: TraceInfo = this.getTraceInfo(traceIdx); if (info.op === TraceOp.Line) { const lineNumber = info.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.lineNumbers.set(lineNumber, lineCount + 1); } }); } } /** Advances the simulation to the next Line op. */ public step(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { break; } } } /** * Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs. * Breakpoints will also stop the simulation even if we haven't reached an Exit. */ public stepOver(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { const canEscapeFromThisStackDepth = this.stack.length <= initialStackDepth; if (this.execute(this.cursor++)) { if (canEscapeFromThisStackDepth || this.atBreakpoint()) { break; } } } } /** * Advances the simulation until we exit from the current stack frame. * Breakpoints will also stop the simulation even if we haven't left the stack frame. */ public stepOut(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { const hasEscapedFromInitialStackDepth = this.stack.length < initialStackDepth; if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) { break; } } } } /** Advances the simulation until we hit a breakpoint, or the trace completes. */ public run(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { if (this.atBreakpoint()) { break; } } } } /** * Cleans up temporary state between steps, such as the dirty mask and function return values. */ private tidyState(): void { this.dirtyMask.fill(false); const stackTop = this.stack[this.stack.length - 1]; this.returnValues.forEach((_, slotIdx: number) => { stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx]; }); } /** Returns true if we have reached the end of the trace. */ public traceHasCompleted(): boolean { return this.trace == null || this.cursor >= this.trace.trace.length; } /** Reports the position of the cursor "read head" within the array of trace instructions. */ public getCursor(): number { return this.cursor; } /** Returns true if the current line has a breakpoint set on it. */ public atBreakpoint(): boolean { return this.breakpointLines.has(this.getCurrentLine()); } /** Replaces all current breakpoints with a new set of them. */ public setBreakpoints(breakpointLines: Set<number>): void { this.breakpointLines = breakpointLines; } /** Returns the current set of lines which have a breakpoint. */ public getBreakpoints(): Set<number> { return this.breakpointLines; } /** Adds a breakpoint to a line (if one doesn't exist). */ public addBreakpoint(line: number): void { this.breakpointLines.add(line); } /** Removes a breakpoint from a line (if one exists). */ public removeBreakpoint(line: number): void { this.breakpointLines.delete(line); } /** Retrieves the current line. */ public getCurrentLine(): number { this.check(this.stack.length > 0); return this.stack[this.stack.length - 1].line; } /** Retrieves the current line for a given stack frame. */ public getCurrentLineInStackFrame(stackFrameIndex: number): number { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.stack[stackFrameIndex].line; } /** * Returns every line number reached inside this debug trace, along with the remaining number of * times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice. */ public getLineNumbersReached(): Map<number, number> { return this.lineNumbers; } /** Returns the call stack as an array of FunctionInfo indices. */ public getCallStack(): number[] { this.check(this.stack.length > 0); return this.stack.slice(1).map((frame: StackFrame) => frame.func); } /** Returns the size of the call stack. */ public getStackDepth(): number
/** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */ public getSlotComponentSuffix(slotIndex: number): string { const slot: SlotInfo = this.trace!.slots[slotIndex]; if (slot.rows > 1) { return `[${Math.floor(slot.index / slot.rows)}][${ slot.index % slot.rows }]`; } if (slot.columns > 1) { switch (slot.index) { case 0: return '.x'; case 1: return '.y'; case 2: return '.z'; case 3: return '.w'; default: return '[???]'; } } return ''; } /** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */ private interpretValueBits( slotIdx: number, valueBits: number ): number | boolean { const bitArray: Int32Array = new Int32Array(1); bitArray[0] = valueBits; switch (this.trace!.slots[slotIdx].kind) { case NumberKind.Float: return new Float32Array(bitArray.buffer)[0]; case NumberKind.Unsigned: return new Uint32Array(bitArray.buffer)[0]; case NumberKind.Boolean: return valueBits !== 0; case NumberKind.Signed: return valueBits; default: return valueBits; } } /** Returns a vector of the indices and values of each slot that is enabled in `bits`. */ private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] { this.check(displayMask.length === this.slots.length); let vars: VariableData[] = []; displayMask.forEach((_, slot: number) => { if (displayMask[slot]) { const varData: VariableData = { slotIndex: slot, dirty: this.dirtyMask[slot], value: this.interpretValueBits(slot, this.slots[slot].value), }; vars.push(varData); } }); // Order the variable list so that the most recently-written variables are shown at the top. vars = vars.sort((a: VariableData, b: VariableData) => { // Order by descending write-time. const delta = this.slots[b.slotIndex].writeTime - this.slots[a.slotIndex].writeTime; if (delta !== 0) { return delta; } // If write times match, order by ascending slot index (preserving the existing order). return a.slotIndex - b.slotIndex; }); return vars; } /** Returns the variables in a given stack frame. */ public getLocalVariables(stackFrameIndex: number): VariableData[] { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.getVariablesForDisplayMask( this.stack[stackFrameIndex].displayMask ); } /** Returns the variables at global scope. */ public getGlobalVariables(): VariableData[] { if (this.stack.length < 1) { return []; } return this.getVariablesForDisplayMask(this.stack[0].displayMask); } /** Updates fWriteTime for the entire variable at a given slot. */ private updateVariableWriteTime(slotIdx: number, cursor: number): void { // The slotIdx could point to any slot within a variable. // We want to update the write time on EVERY slot associated with this variable. // The SlotInfo's groupIndex gives us enough information to find the affected range. const changedSlot = this.trace!.slots[slotIdx]; slotIdx -= changedSlot.groupIdx!; this.check(slotIdx >= 0); this.check(slotIdx < this.trace!.slots.length); for (;;) { this.slots[slotIdx++].writeTime = cursor; // Stop if we've reached the final slot. if (slotIdx >= this.trace!.slots.length) { break; } // Each separate variable-group starts with a groupIndex of 0; stop when we detect this. if (this.trace!.slots[slotIdx].groupIdx! == 0) { break; } } } /** * Executes the trace op at the passed-in cursor position. Returns true if we've reached a line * or exit trace op, which indicate a stopping point. */ private execute(position: number): boolean { const trace = this.getTraceInfo(position); this.check(this.stack.length > 0); const stackTop: StackFrame = this.stack[this.stack.length - 1]; switch (trace.op) { case TraceOp.Line: { // data: line number, (unused) const lineNumber = trace.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.check(lineNumber >= 0); this.check(lineNumber < this.trace!.source.length); this.check(lineCount > 0); stackTop.line = lineNumber; this.lineNumbers.set(lineNumber, lineCount - 1); return true; } case TraceOp.Var: { // data: slot, value const slotIdx = trace.data[0]; const value = trace.data[1]; this.check(slotIdx >= 0); this.check(slotIdx < this.slots.length); this.slots[slotIdx].value = value; this.slots[slotIdx].scope = Math.min( this.slots[slotIdx].scope, this.scope ); this.updateVariableWriteTime(slotIdx, position); if ((this.trace!.slots[slotIdx].retval ?? -1) < 0) { // Normal variables are associated with the current function. stackTop.displayMask[slotIdx] = true; } else { // Return values are associated with the parent function (since the current function // is exiting and we won't see them there). this.check(this.stack.length > 1); this.stack[this.stack.length - 2].displayMask[slotIdx] = true; } this.dirtyMask[slotIdx] = true; break; } case TraceOp.Enter: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(fnIdx >= 0); this.check(fnIdx < this.trace!.functions.length); const enteredStackFrame: StackFrame = { func: fnIdx, line: -1, displayMask: Array<boolean>(this.slots.length).fill(false), }; this.stack.push(enteredStackFrame); break; } case TraceOp.Exit: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(stackTop.func === fnIdx); this.stack.pop(); return true; } case TraceOp.Scope: { // data: scope delta, (unused) const scopeDelta = trace.data[0]; this.scope += scopeDelta; if (scopeDelta < 0) { // If the scope is being reduced, discard variables that are now out of scope. this.slots.forEach((_, slotIdx: number) => { if (this.scope < this.slots[slotIdx].scope) { this.slots[slotIdx].scope = Infinity; stackTop.displayMask[slotIdx] = false; } }); } break; } default: { throw new Error('unrecognized trace instruction'); } } return false; } }
{ this.check(this.stack.length > 0); return this.stack.length - 1; }
identifier_body
debug-trace-player.ts
import { DebugTrace, SlotInfo } from '../debug-trace/debug-trace'; // The TraceOp enum must stay in sync with SkSL::SkVMTraceInfo::Op. enum TraceOp { Line = 0, Var = 1, Enter = 2, Exit = 3, Scope = 4, } // The NumberKind enum must stay in sync with SkSL::Type::NumberKind. enum NumberKind { Float = 0, Signed = 1, Unsigned = 2, Boolean = 3, Nonnumeric = 4, } // Trace data comes in from the JSON as a number[]. We unpack it into a TraceInfo for ease of use. type TraceInfo = { op: TraceOp; data: number[]; }; type StackFrame = { // A FunctionInfo from trace.functions. func: number; // The current line number within the function. line: number; // Any variable slots which have been touched in this function. displayMask: boolean[]; }; type Slot = { // The current raw value held in this slot (as a 32-bit integer, not bit-punned). value: number; // The scope depth associated with this slot (as indicated by trace_scope). scope: number; // When was the variable in this slot most recently written? (as a cursor position) writeTime: number; }; export type VariableData = { // A SlotInfo from trace.slots. slotIndex: number; // Has this slot been written-to since the last step call? dirty: boolean; // The current value held in this slot (properly bit-punned/cast to the expected type) value: number | boolean; }; export class DebugTracePlayer { private trace: DebugTrace | null = null; // The position of the read head within the trace array. private cursor: number = 0; // Tracks the current scope depth (as indicated by trace_scope). private scope: number = 0; // Tracks assignments into our data slots. private slots: Slot[] = []; // Tracks the trace stack (as indicated by trace_enter and trace_exit). private stack: StackFrame[] = []; // the execution stack // Tracks which line numbers are reached by the trace, and the number of times it's reached. private lineNumbers: Map<number, number> = new Map(); // Tracks all the data slots which have been touched during the current step. private dirtyMask: boolean[] = []; // Tracks all the data slots which hold function return values. private returnValues: boolean[] = []; // Tracks line numbers that have breakpoints set on them. private breakpointLines: Set<number> = new Set(); /** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */ private check(result: boolean): void { if (!result) { throw new Error('check failed'); } } /** Copies trace info from the JSON number array into a TraceInfo struct. */ private getTraceInfo(position: number): TraceInfo { this.check(position < this.trace!.trace.length); this.check(this.trace!.trace[position][0] in TraceOp); const info: TraceInfo = { op: this.trace!.trace[position][0] as TraceOp, data: this.trace!.trace[position].slice(1), }; return info; } /** Resets playback to the start of the trace. Breakpoints are not cleared. */ public reset(trace: DebugTrace | null): void { const nslots = trace?.slots?.length ?? 0; const globalStackFrame: StackFrame = { func: -1, line: -1, displayMask: Array<boolean>(nslots).map(() => false), }; this.trace = trace; this.cursor = 0; this.slots = []; this.stack = [globalStackFrame]; this.dirtyMask = Array<boolean>(nslots).map(() => false); this.returnValues = Array<boolean>(nslots).map(() => false); if (trace !== null) { this.slots = trace.slots.map( (): Slot => ({ value: 0, scope: Infinity, writeTime: 0, }) ); this.returnValues = trace.slots.map( (slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0 ); // Build a map holding the number of times each line is reached. this.lineNumbers.clear(); trace.trace.forEach((_, traceIdx: number) => { const info: TraceInfo = this.getTraceInfo(traceIdx); if (info.op === TraceOp.Line) { const lineNumber = info.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.lineNumbers.set(lineNumber, lineCount + 1); } }); } } /** Advances the simulation to the next Line op. */ public step(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { break; } } } /** * Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs. * Breakpoints will also stop the simulation even if we haven't reached an Exit. */ public stepOver(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { const canEscapeFromThisStackDepth = this.stack.length <= initialStackDepth; if (this.execute(this.cursor++)) { if (canEscapeFromThisStackDepth || this.atBreakpoint()) { break; } } } } /** * Advances the simulation until we exit from the current stack frame. * Breakpoints will also stop the simulation even if we haven't left the stack frame. */ public stepOut(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { const hasEscapedFromInitialStackDepth = this.stack.length < initialStackDepth; if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) { break; } } } } /** Advances the simulation until we hit a breakpoint, or the trace completes. */ public run(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { if (this.atBreakpoint()) { break; } } } } /** * Cleans up temporary state between steps, such as the dirty mask and function return values. */ private tidyState(): void { this.dirtyMask.fill(false); const stackTop = this.stack[this.stack.length - 1]; this.returnValues.forEach((_, slotIdx: number) => { stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx]; }); } /** Returns true if we have reached the end of the trace. */ public traceHasCompleted(): boolean { return this.trace == null || this.cursor >= this.trace.trace.length; } /** Reports the position of the cursor "read head" within the array of trace instructions. */ public getCursor(): number { return this.cursor; } /** Returns true if the current line has a breakpoint set on it. */ public atBreakpoint(): boolean { return this.breakpointLines.has(this.getCurrentLine()); } /** Replaces all current breakpoints with a new set of them. */ public setBreakpoints(breakpointLines: Set<number>): void { this.breakpointLines = breakpointLines; } /** Returns the current set of lines which have a breakpoint. */ public getBreakpoints(): Set<number> { return this.breakpointLines; } /** Adds a breakpoint to a line (if one doesn't exist). */ public addBreakpoint(line: number): void { this.breakpointLines.add(line); } /** Removes a breakpoint from a line (if one exists). */ public removeBreakpoint(line: number): void { this.breakpointLines.delete(line); } /** Retrieves the current line. */ public getCurrentLine(): number { this.check(this.stack.length > 0); return this.stack[this.stack.length - 1].line; } /** Retrieves the current line for a given stack frame. */ public getCurrentLineInStackFrame(stackFrameIndex: number): number { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.stack[stackFrameIndex].line; } /** * Returns every line number reached inside this debug trace, along with the remaining number of * times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice. */ public getLineNumbersReached(): Map<number, number> { return this.lineNumbers; } /** Returns the call stack as an array of FunctionInfo indices. */ public getCallStack(): number[] { this.check(this.stack.length > 0); return this.stack.slice(1).map((frame: StackFrame) => frame.func); } /** Returns the size of the call stack. */ public getStackDepth(): number { this.check(this.stack.length > 0); return this.stack.length - 1; } /** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */ public getSlotComponentSuffix(slotIndex: number): string { const slot: SlotInfo = this.trace!.slots[slotIndex]; if (slot.rows > 1) { return `[${Math.floor(slot.index / slot.rows)}][${ slot.index % slot.rows }]`; } if (slot.columns > 1) { switch (slot.index) { case 0: return '.x'; case 1: return '.y'; case 2: return '.z'; case 3: return '.w'; default: return '[???]'; } } return ''; } /** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */ private interpretValueBits( slotIdx: number, valueBits: number ): number | boolean { const bitArray: Int32Array = new Int32Array(1); bitArray[0] = valueBits; switch (this.trace!.slots[slotIdx].kind) { case NumberKind.Float: return new Float32Array(bitArray.buffer)[0]; case NumberKind.Unsigned: return new Uint32Array(bitArray.buffer)[0]; case NumberKind.Boolean: return valueBits !== 0; case NumberKind.Signed: return valueBits; default: return valueBits; } } /** Returns a vector of the indices and values of each slot that is enabled in `bits`. */ private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] { this.check(displayMask.length === this.slots.length); let vars: VariableData[] = []; displayMask.forEach((_, slot: number) => { if (displayMask[slot]) { const varData: VariableData = { slotIndex: slot, dirty: this.dirtyMask[slot], value: this.interpretValueBits(slot, this.slots[slot].value), }; vars.push(varData); } }); // Order the variable list so that the most recently-written variables are shown at the top. vars = vars.sort((a: VariableData, b: VariableData) => { // Order by descending write-time. const delta = this.slots[b.slotIndex].writeTime - this.slots[a.slotIndex].writeTime; if (delta !== 0)
// If write times match, order by ascending slot index (preserving the existing order). return a.slotIndex - b.slotIndex; }); return vars; } /** Returns the variables in a given stack frame. */ public getLocalVariables(stackFrameIndex: number): VariableData[] { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.getVariablesForDisplayMask( this.stack[stackFrameIndex].displayMask ); } /** Returns the variables at global scope. */ public getGlobalVariables(): VariableData[] { if (this.stack.length < 1) { return []; } return this.getVariablesForDisplayMask(this.stack[0].displayMask); } /** Updates fWriteTime for the entire variable at a given slot. */ private updateVariableWriteTime(slotIdx: number, cursor: number): void { // The slotIdx could point to any slot within a variable. // We want to update the write time on EVERY slot associated with this variable. // The SlotInfo's groupIndex gives us enough information to find the affected range. const changedSlot = this.trace!.slots[slotIdx]; slotIdx -= changedSlot.groupIdx!; this.check(slotIdx >= 0); this.check(slotIdx < this.trace!.slots.length); for (;;) { this.slots[slotIdx++].writeTime = cursor; // Stop if we've reached the final slot. if (slotIdx >= this.trace!.slots.length) { break; } // Each separate variable-group starts with a groupIndex of 0; stop when we detect this. if (this.trace!.slots[slotIdx].groupIdx! == 0) { break; } } } /** * Executes the trace op at the passed-in cursor position. Returns true if we've reached a line * or exit trace op, which indicate a stopping point. */ private execute(position: number): boolean { const trace = this.getTraceInfo(position); this.check(this.stack.length > 0); const stackTop: StackFrame = this.stack[this.stack.length - 1]; switch (trace.op) { case TraceOp.Line: { // data: line number, (unused) const lineNumber = trace.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.check(lineNumber >= 0); this.check(lineNumber < this.trace!.source.length); this.check(lineCount > 0); stackTop.line = lineNumber; this.lineNumbers.set(lineNumber, lineCount - 1); return true; } case TraceOp.Var: { // data: slot, value const slotIdx = trace.data[0]; const value = trace.data[1]; this.check(slotIdx >= 0); this.check(slotIdx < this.slots.length); this.slots[slotIdx].value = value; this.slots[slotIdx].scope = Math.min( this.slots[slotIdx].scope, this.scope ); this.updateVariableWriteTime(slotIdx, position); if ((this.trace!.slots[slotIdx].retval ?? -1) < 0) { // Normal variables are associated with the current function. stackTop.displayMask[slotIdx] = true; } else { // Return values are associated with the parent function (since the current function // is exiting and we won't see them there). this.check(this.stack.length > 1); this.stack[this.stack.length - 2].displayMask[slotIdx] = true; } this.dirtyMask[slotIdx] = true; break; } case TraceOp.Enter: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(fnIdx >= 0); this.check(fnIdx < this.trace!.functions.length); const enteredStackFrame: StackFrame = { func: fnIdx, line: -1, displayMask: Array<boolean>(this.slots.length).fill(false), }; this.stack.push(enteredStackFrame); break; } case TraceOp.Exit: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(stackTop.func === fnIdx); this.stack.pop(); return true; } case TraceOp.Scope: { // data: scope delta, (unused) const scopeDelta = trace.data[0]; this.scope += scopeDelta; if (scopeDelta < 0) { // If the scope is being reduced, discard variables that are now out of scope. this.slots.forEach((_, slotIdx: number) => { if (this.scope < this.slots[slotIdx].scope) { this.slots[slotIdx].scope = Infinity; stackTop.displayMask[slotIdx] = false; } }); } break; } default: { throw new Error('unrecognized trace instruction'); } } return false; } }
{ return delta; }
conditional_block
debug-trace-player.ts
import { DebugTrace, SlotInfo } from '../debug-trace/debug-trace'; // The TraceOp enum must stay in sync with SkSL::SkVMTraceInfo::Op. enum TraceOp { Line = 0, Var = 1, Enter = 2, Exit = 3, Scope = 4, } // The NumberKind enum must stay in sync with SkSL::Type::NumberKind. enum NumberKind { Float = 0, Signed = 1, Unsigned = 2, Boolean = 3, Nonnumeric = 4, } // Trace data comes in from the JSON as a number[]. We unpack it into a TraceInfo for ease of use. type TraceInfo = { op: TraceOp; data: number[]; }; type StackFrame = { // A FunctionInfo from trace.functions. func: number; // The current line number within the function. line: number; // Any variable slots which have been touched in this function. displayMask: boolean[]; }; type Slot = { // The current raw value held in this slot (as a 32-bit integer, not bit-punned). value: number; // The scope depth associated with this slot (as indicated by trace_scope). scope: number; // When was the variable in this slot most recently written? (as a cursor position) writeTime: number; }; export type VariableData = { // A SlotInfo from trace.slots. slotIndex: number; // Has this slot been written-to since the last step call? dirty: boolean; // The current value held in this slot (properly bit-punned/cast to the expected type) value: number | boolean; }; export class DebugTracePlayer { private trace: DebugTrace | null = null; // The position of the read head within the trace array. private cursor: number = 0; // Tracks the current scope depth (as indicated by trace_scope). private scope: number = 0; // Tracks assignments into our data slots. private slots: Slot[] = []; // Tracks the trace stack (as indicated by trace_enter and trace_exit). private stack: StackFrame[] = []; // the execution stack // Tracks which line numbers are reached by the trace, and the number of times it's reached. private lineNumbers: Map<number, number> = new Map(); // Tracks all the data slots which have been touched during the current step. private dirtyMask: boolean[] = []; // Tracks all the data slots which hold function return values. private returnValues: boolean[] = []; // Tracks line numbers that have breakpoints set on them. private breakpointLines: Set<number> = new Set(); /** Throws an error if a precondition is not met. Indicates a logic bug or invalid trace. */ private check(result: boolean): void { if (!result) { throw new Error('check failed'); } } /** Copies trace info from the JSON number array into a TraceInfo struct. */ private getTraceInfo(position: number): TraceInfo { this.check(position < this.trace!.trace.length); this.check(this.trace!.trace[position][0] in TraceOp); const info: TraceInfo = { op: this.trace!.trace[position][0] as TraceOp, data: this.trace!.trace[position].slice(1), }; return info; } /** Resets playback to the start of the trace. Breakpoints are not cleared. */ public reset(trace: DebugTrace | null): void { const nslots = trace?.slots?.length ?? 0; const globalStackFrame: StackFrame = { func: -1, line: -1, displayMask: Array<boolean>(nslots).map(() => false), }; this.trace = trace; this.cursor = 0; this.slots = []; this.stack = [globalStackFrame]; this.dirtyMask = Array<boolean>(nslots).map(() => false); this.returnValues = Array<boolean>(nslots).map(() => false); if (trace !== null) { this.slots = trace.slots.map( (): Slot => ({ value: 0, scope: Infinity, writeTime: 0, }) ); this.returnValues = trace.slots.map( (slotInfo: SlotInfo): boolean => (slotInfo.retval ?? -1) >= 0 ); // Build a map holding the number of times each line is reached. this.lineNumbers.clear(); trace.trace.forEach((_, traceIdx: number) => { const info: TraceInfo = this.getTraceInfo(traceIdx); if (info.op === TraceOp.Line) { const lineNumber = info.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.lineNumbers.set(lineNumber, lineCount + 1); } }); } } /** Advances the simulation to the next Line op. */ public step(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { break; } } } /** * Advances the simulation to the next Line op, skipping past matched Enter/Exit pairs. * Breakpoints will also stop the simulation even if we haven't reached an Exit. */ public stepOver(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { const canEscapeFromThisStackDepth = this.stack.length <= initialStackDepth; if (this.execute(this.cursor++)) { if (canEscapeFromThisStackDepth || this.atBreakpoint()) { break; } } } } /** * Advances the simulation until we exit from the current stack frame. * Breakpoints will also stop the simulation even if we haven't left the stack frame. */ public stepOut(): void { this.tidyState(); const initialStackDepth = this.stack.length; while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { const hasEscapedFromInitialStackDepth = this.stack.length < initialStackDepth; if (hasEscapedFromInitialStackDepth || this.atBreakpoint()) { break; } } } } /** Advances the simulation until we hit a breakpoint, or the trace completes. */ public run(): void { this.tidyState(); while (!this.traceHasCompleted()) { if (this.execute(this.cursor++)) { if (this.atBreakpoint()) { break; } } } } /** * Cleans up temporary state between steps, such as the dirty mask and function return values. */ private tidyState(): void { this.dirtyMask.fill(false); const stackTop = this.stack[this.stack.length - 1]; this.returnValues.forEach((_, slotIdx: number) => { stackTop.displayMask[slotIdx] &&= !this.returnValues[slotIdx]; }); } /** Returns true if we have reached the end of the trace. */ public traceHasCompleted(): boolean { return this.trace == null || this.cursor >= this.trace.trace.length; } /** Reports the position of the cursor "read head" within the array of trace instructions. */ public getCursor(): number { return this.cursor; } /** Returns true if the current line has a breakpoint set on it. */ public atBreakpoint(): boolean { return this.breakpointLines.has(this.getCurrentLine()); } /** Replaces all current breakpoints with a new set of them. */ public setBreakpoints(breakpointLines: Set<number>): void { this.breakpointLines = breakpointLines; } /** Returns the current set of lines which have a breakpoint. */ public getBreakpoints(): Set<number> { return this.breakpointLines; } /** Adds a breakpoint to a line (if one doesn't exist). */ public addBreakpoint(line: number): void { this.breakpointLines.add(line); } /** Removes a breakpoint from a line (if one exists). */ public removeBreakpoint(line: number): void { this.breakpointLines.delete(line); } /** Retrieves the current line. */ public getCurrentLine(): number { this.check(this.stack.length > 0); return this.stack[this.stack.length - 1].line; } /** Retrieves the current line for a given stack frame. */ public getCurrentLineInStackFrame(stackFrameIndex: number): number { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.stack[stackFrameIndex].line; } /** * Returns every line number reached inside this debug trace, along with the remaining number of * times that this trace will reach it. e.g. {100, 2} means line 100 will be reached twice. */ public getLineNumbersReached(): Map<number, number> { return this.lineNumbers; } /** Returns the call stack as an array of FunctionInfo indices. */ public getCallStack(): number[] { this.check(this.stack.length > 0); return this.stack.slice(1).map((frame: StackFrame) => frame.func); } /** Returns the size of the call stack. */ public getStackDepth(): number { this.check(this.stack.length > 0); return this.stack.length - 1; } /** Returns a slot's component as a variable-name suffix, e.g. ".x" or "[2][2]". */ public getSlotComponentSuffix(slotIndex: number): string { const slot: SlotInfo = this.trace!.slots[slotIndex]; if (slot.rows > 1) { return `[${Math.floor(slot.index / slot.rows)}][${ slot.index % slot.rows }]`; } if (slot.columns > 1) { switch (slot.index) { case 0: return '.x'; case 1: return '.y'; case 2: return '.z'; case 3: return '.w'; default: return '[???]'; } } return ''; } /** Bit-casts a value for a given slot into a double, honoring the slot's NumberKind. */ private interpretValueBits( slotIdx: number, valueBits: number ): number | boolean { const bitArray: Int32Array = new Int32Array(1); bitArray[0] = valueBits; switch (this.trace!.slots[slotIdx].kind) { case NumberKind.Float: return new Float32Array(bitArray.buffer)[0]; case NumberKind.Unsigned: return new Uint32Array(bitArray.buffer)[0]; case NumberKind.Boolean: return valueBits !== 0; case NumberKind.Signed: return valueBits; default: return valueBits; } } /** Returns a vector of the indices and values of each slot that is enabled in `bits`. */ private getVariablesForDisplayMask(displayMask: boolean[]): VariableData[] { this.check(displayMask.length === this.slots.length); let vars: VariableData[] = []; displayMask.forEach((_, slot: number) => { if (displayMask[slot]) { const varData: VariableData = { slotIndex: slot, dirty: this.dirtyMask[slot], value: this.interpretValueBits(slot, this.slots[slot].value), }; vars.push(varData); } }); // Order the variable list so that the most recently-written variables are shown at the top. vars = vars.sort((a: VariableData, b: VariableData) => { // Order by descending write-time. const delta = this.slots[b.slotIndex].writeTime - this.slots[a.slotIndex].writeTime;
} // If write times match, order by ascending slot index (preserving the existing order). return a.slotIndex - b.slotIndex; }); return vars; } /** Returns the variables in a given stack frame. */ public getLocalVariables(stackFrameIndex: number): VariableData[] { // The first entry on the stack is the "global" frame before we enter main, so offset our index // by one to account for it. ++stackFrameIndex; this.check(stackFrameIndex > 0); this.check(stackFrameIndex < this.stack.length); return this.getVariablesForDisplayMask( this.stack[stackFrameIndex].displayMask ); } /** Returns the variables at global scope. */ public getGlobalVariables(): VariableData[] { if (this.stack.length < 1) { return []; } return this.getVariablesForDisplayMask(this.stack[0].displayMask); } /** Updates fWriteTime for the entire variable at a given slot. */ private updateVariableWriteTime(slotIdx: number, cursor: number): void { // The slotIdx could point to any slot within a variable. // We want to update the write time on EVERY slot associated with this variable. // The SlotInfo's groupIndex gives us enough information to find the affected range. const changedSlot = this.trace!.slots[slotIdx]; slotIdx -= changedSlot.groupIdx!; this.check(slotIdx >= 0); this.check(slotIdx < this.trace!.slots.length); for (;;) { this.slots[slotIdx++].writeTime = cursor; // Stop if we've reached the final slot. if (slotIdx >= this.trace!.slots.length) { break; } // Each separate variable-group starts with a groupIndex of 0; stop when we detect this. if (this.trace!.slots[slotIdx].groupIdx! == 0) { break; } } } /** * Executes the trace op at the passed-in cursor position. Returns true if we've reached a line * or exit trace op, which indicate a stopping point. */ private execute(position: number): boolean { const trace = this.getTraceInfo(position); this.check(this.stack.length > 0); const stackTop: StackFrame = this.stack[this.stack.length - 1]; switch (trace.op) { case TraceOp.Line: { // data: line number, (unused) const lineNumber = trace.data[0]; const lineCount = this.lineNumbers.get(lineNumber) ?? 0; this.check(lineNumber >= 0); this.check(lineNumber < this.trace!.source.length); this.check(lineCount > 0); stackTop.line = lineNumber; this.lineNumbers.set(lineNumber, lineCount - 1); return true; } case TraceOp.Var: { // data: slot, value const slotIdx = trace.data[0]; const value = trace.data[1]; this.check(slotIdx >= 0); this.check(slotIdx < this.slots.length); this.slots[slotIdx].value = value; this.slots[slotIdx].scope = Math.min( this.slots[slotIdx].scope, this.scope ); this.updateVariableWriteTime(slotIdx, position); if ((this.trace!.slots[slotIdx].retval ?? -1) < 0) { // Normal variables are associated with the current function. stackTop.displayMask[slotIdx] = true; } else { // Return values are associated with the parent function (since the current function // is exiting and we won't see them there). this.check(this.stack.length > 1); this.stack[this.stack.length - 2].displayMask[slotIdx] = true; } this.dirtyMask[slotIdx] = true; break; } case TraceOp.Enter: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(fnIdx >= 0); this.check(fnIdx < this.trace!.functions.length); const enteredStackFrame: StackFrame = { func: fnIdx, line: -1, displayMask: Array<boolean>(this.slots.length).fill(false), }; this.stack.push(enteredStackFrame); break; } case TraceOp.Exit: { // data: function index, (unused) const fnIdx = trace.data[0]; this.check(stackTop.func === fnIdx); this.stack.pop(); return true; } case TraceOp.Scope: { // data: scope delta, (unused) const scopeDelta = trace.data[0]; this.scope += scopeDelta; if (scopeDelta < 0) { // If the scope is being reduced, discard variables that are now out of scope. this.slots.forEach((_, slotIdx: number) => { if (this.scope < this.slots[slotIdx].scope) { this.slots[slotIdx].scope = Infinity; stackTop.displayMask[slotIdx] = false; } }); } break; } default: { throw new Error('unrecognized trace instruction'); } } return false; } }
if (delta !== 0) { return delta;
random_line_split
lstm_predictor.py
import numpy as np import os, json import tensorflow as tf import matplotlib.pyplot as plt from .base_predictor import BasePredictor from tools.filter import Exponentially_weighted_averages from tools.bird_view_projection import bird_view_proj class LSTMRNN(object): def __init__(self, n_steps, input_size, output_size, cell_size, batch_size, LR, x_vx_mode):
def add_input_layer(self,): l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size) # Ws (in_size, cell_size) Ws_in = self._weight_variable([self.input_size, self.cell_size]) # bs (cell_size, ) bs_in = self._bias_variable([self.cell_size,]) # l_in_y = (batch * n_steps, cell_size) with tf.name_scope('Wx_plus_b'): l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in # reshape l_in_y ==> (batch, n_steps, cell_size) self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D') def add_cell(self): lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True) # lstm_cell = tf.contrib.rnn.MultiRNNCell( # [lstm_cell() for _ in range(3)], state_is_tuple=True) with tf.name_scope('initial_state'): self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32) self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn( lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) def add_output_layer(self): # shape = (batch * steps, cell_size) l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D') Ws_out = self._weight_variable([self.cell_size, self.output_size]) bs_out = self._bias_variable([self.output_size, ]) # shape = (batch * steps, output_size) with tf.name_scope('Wx_plus_b'): outputs = tf.matmul(l_out_x, Ws_out) + bs_out self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size]) def compute_cost(self): if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) elif self.x_vx_mode == 'x_vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' )+tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) else: pass with tf.name_scope('average_cost'): self.cost = tf.div( tf.reduce_sum(losses, name='losses_sum'), self.batch_size, name='average_cost') tf.summary.scalar('cost', self.cost) @staticmethod def ms_error(labels, logits): # return tf.reduce_sum(tf.square(tf.subtract(labels, logits))) return tf.square(tf.subtract(labels, logits)) def _weight_variable(self, shape, name='weights'): initializer = tf.random_normal_initializer(mean=0., stddev=1.,) return tf.get_variable(shape=shape, initializer=initializer, name=name) def _bias_variable(self, shape, name='biases'): initializer = tf.constant_initializer(0.1) return tf.get_variable(name=name, shape=shape, initializer=initializer) class LSTMPredictor(BasePredictor): def __init__(self, args): BasePredictor.__init__(self, args) self.x_vx_mode = args.x_vx_mode self.lstm_predictor_model = args.lstm_predictor_model self.Tx = 50 #args.n_steps self.M = 100 #args.batch_size self.n_a = 16 self.lr = args.learning_rate self.WIDTH = 500. self.HIGHT = 500. self.bboxes = [] self.samples = [] self.x_all = [] self.vx_all = [] if self.x_vx_mode == 'x': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'vx': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'x_vx': self.n_x = 3 self.n_y = 2 else: pass self.build_model() def get_test_batch(self, X_test, batch_size): X_test = np.asarray(X_test) X_test_batch = np.tile(X_test, (batch_size, 1, 1)) # print('X_test_batch shape:',X_test_batch.shape) return X_test_batch def extract_bbox(self, b): ''' "top": 597.832580566406, "right": 880.870239257812, "bot": 739.836364746094, "left": 686.165344238281 ''' # h = (b['bot'] - b['top'] + 1.) / self.HIGHT # w = (b['right'] - b['left'] + 1.) / self.WIDTH h = (b[3] - b[1] + 1.) / self.HIGHT w = (b[2] - b[0] + 1.) / self.WIDTH area = h * w # x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot']) # return [h, w, area, x] # return [h, w, area, area_dao] return [h, w, area] def extract_sample(self, filename, time_file): with open(filename) as fin: gts = json.loads(fin.read())['frame_data'] # t_samples = [extract_bbox(e['ref_bbox'])] for e in gts] t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts] # t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v times = [] with open(time_file, 'r') as fin: for line in fin.readlines(): times.append(float(line)) # add vx as new feature tvx = [] for i in range(len(times)): if i == 0: tvx.append(0) else: tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1])) # tvx[0] = tvx[1] # for i in range(len(times)): # t_samples[i].append(tvx[i]) return t_samples, t_targets def build_model(self): config = tf.ConfigProto(allow_soft_placement=True) g2 = tf.Graph() self.sess2 = tf.Session(config=config, graph=g2) with self.sess2.as_default(): with g2.as_default(): LR = tf.Variable(self.lr, trainable=False) self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode) saver = tf.train.Saver() saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm') saver.restore(self.sess2, saved_path+'_20000') def predict(self, bbox, time, fid): bbox = bbox[:4] self.bboxes.append(bbox) self.samples.append(self.extract_bbox(bbox)) test_batch = self.get_test_batch(self.samples, self.M) feed_dict = { self.model.xs: test_batch, } test_pred = self.sess2.run(self.model.pred, feed_dict=feed_dict).astype(np.float32) x = test_pred[0,:,0][-1] vx= test_pred[0,:,1][-1] # x, vx = self.lstm_process(bbox,fid) cur_pred = { 'fid': fid, 'vx': float(vx), 'x': float(x), # 'ref_bbox': { # 'top': float(bbox[0]), 'left': float(bbox[1]), # 'bot': float(bbox[2]), 'right': float(bbox[3]) # } } self.result.append(cur_pred) if fid == 500: for i, x in enumerate(test_pred[0,:,0]): self.result[i]['x'] = x for i, vx in enumerate(test_pred[0,:,1]): self.result[i]['vx'] = vx return cur_pred def to_json(self, filename): print('Save prediction to ', filename) with open(filename, 'w') as fout: data = {'frame_data': self.result} print('data:', data) print("Ready save to json...") # json.dump(data, fout, indent=4, ensure_ascii=False) json.dump(data, fout) print("Saved to json.") def lstm_process(self, bbox, fid): return x,vx
self.n_steps = n_steps self.input_size = input_size self.output_size = output_size self.cell_size = cell_size self.batch_size = batch_size self.learning_rate = LR self.x_vx_mode = x_vx_mode with tf.name_scope('inputs'): self.xs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='xs') self.ys = tf.placeholder(tf.float32, [batch_size, None, output_size], name='ys') with tf.variable_scope('in_hidden'): self.add_input_layer() with tf.variable_scope('LSTM_cell'): self.add_cell() with tf.variable_scope('out_hidden'): self.add_output_layer() with tf.name_scope('cost'): self.compute_cost() with tf.name_scope('train'): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost)
identifier_body
lstm_predictor.py
import numpy as np import os, json import tensorflow as tf import matplotlib.pyplot as plt from .base_predictor import BasePredictor from tools.filter import Exponentially_weighted_averages from tools.bird_view_projection import bird_view_proj class LSTMRNN(object): def __init__(self, n_steps, input_size, output_size, cell_size, batch_size, LR, x_vx_mode): self.n_steps = n_steps self.input_size = input_size self.output_size = output_size self.cell_size = cell_size self.batch_size = batch_size self.learning_rate = LR self.x_vx_mode = x_vx_mode with tf.name_scope('inputs'): self.xs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='xs') self.ys = tf.placeholder(tf.float32, [batch_size, None, output_size], name='ys') with tf.variable_scope('in_hidden'): self.add_input_layer() with tf.variable_scope('LSTM_cell'): self.add_cell() with tf.variable_scope('out_hidden'): self.add_output_layer() with tf.name_scope('cost'): self.compute_cost() with tf.name_scope('train'): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost) def add_input_layer(self,): l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size) # Ws (in_size, cell_size) Ws_in = self._weight_variable([self.input_size, self.cell_size]) # bs (cell_size, ) bs_in = self._bias_variable([self.cell_size,]) # l_in_y = (batch * n_steps, cell_size) with tf.name_scope('Wx_plus_b'): l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in # reshape l_in_y ==> (batch, n_steps, cell_size) self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D') def add_cell(self): lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True) # lstm_cell = tf.contrib.rnn.MultiRNNCell( # [lstm_cell() for _ in range(3)], state_is_tuple=True) with tf.name_scope('initial_state'): self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32) self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn( lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) def add_output_layer(self): # shape = (batch * steps, cell_size) l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D') Ws_out = self._weight_variable([self.cell_size, self.output_size]) bs_out = self._bias_variable([self.output_size, ]) # shape = (batch * steps, output_size) with tf.name_scope('Wx_plus_b'): outputs = tf.matmul(l_out_x, Ws_out) + bs_out self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size]) def compute_cost(self): if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) elif self.x_vx_mode == 'x_vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' )+tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) else: pass with tf.name_scope('average_cost'): self.cost = tf.div( tf.reduce_sum(losses, name='losses_sum'), self.batch_size, name='average_cost') tf.summary.scalar('cost', self.cost) @staticmethod def ms_error(labels, logits): # return tf.reduce_sum(tf.square(tf.subtract(labels, logits))) return tf.square(tf.subtract(labels, logits)) def _weight_variable(self, shape, name='weights'): initializer = tf.random_normal_initializer(mean=0., stddev=1.,) return tf.get_variable(shape=shape, initializer=initializer, name=name) def _bias_variable(self, shape, name='biases'): initializer = tf.constant_initializer(0.1) return tf.get_variable(name=name, shape=shape, initializer=initializer) class LSTMPredictor(BasePredictor): def __init__(self, args): BasePredictor.__init__(self, args) self.x_vx_mode = args.x_vx_mode self.lstm_predictor_model = args.lstm_predictor_model self.Tx = 50 #args.n_steps self.M = 100 #args.batch_size self.n_a = 16 self.lr = args.learning_rate self.WIDTH = 500. self.HIGHT = 500. self.bboxes = [] self.samples = [] self.x_all = [] self.vx_all = [] if self.x_vx_mode == 'x': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'vx': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'x_vx': self.n_x = 3 self.n_y = 2 else: pass self.build_model() def get_test_batch(self, X_test, batch_size): X_test = np.asarray(X_test) X_test_batch = np.tile(X_test, (batch_size, 1, 1)) # print('X_test_batch shape:',X_test_batch.shape) return X_test_batch def extract_bbox(self, b): ''' "top": 597.832580566406, "right": 880.870239257812, "bot": 739.836364746094, "left": 686.165344238281 ''' # h = (b['bot'] - b['top'] + 1.) / self.HIGHT # w = (b['right'] - b['left'] + 1.) / self.WIDTH h = (b[3] - b[1] + 1.) / self.HIGHT
# return [h, w, area, x] # return [h, w, area, area_dao] return [h, w, area] def extract_sample(self, filename, time_file): with open(filename) as fin: gts = json.loads(fin.read())['frame_data'] # t_samples = [extract_bbox(e['ref_bbox'])] for e in gts] t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts] # t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v times = [] with open(time_file, 'r') as fin: for line in fin.readlines(): times.append(float(line)) # add vx as new feature tvx = [] for i in range(len(times)): if i == 0: tvx.append(0) else: tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1])) # tvx[0] = tvx[1] # for i in range(len(times)): # t_samples[i].append(tvx[i]) return t_samples, t_targets def build_model(self): config = tf.ConfigProto(allow_soft_placement=True) g2 = tf.Graph() self.sess2 = tf.Session(config=config, graph=g2) with self.sess2.as_default(): with g2.as_default(): LR = tf.Variable(self.lr, trainable=False) self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode) saver = tf.train.Saver() saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm') saver.restore(self.sess2, saved_path+'_20000') def predict(self, bbox, time, fid): bbox = bbox[:4] self.bboxes.append(bbox) self.samples.append(self.extract_bbox(bbox)) test_batch = self.get_test_batch(self.samples, self.M) feed_dict = { self.model.xs: test_batch, } test_pred = self.sess2.run(self.model.pred, feed_dict=feed_dict).astype(np.float32) x = test_pred[0,:,0][-1] vx= test_pred[0,:,1][-1] # x, vx = self.lstm_process(bbox,fid) cur_pred = { 'fid': fid, 'vx': float(vx), 'x': float(x), # 'ref_bbox': { # 'top': float(bbox[0]), 'left': float(bbox[1]), # 'bot': float(bbox[2]), 'right': float(bbox[3]) # } } self.result.append(cur_pred) if fid == 500: for i, x in enumerate(test_pred[0,:,0]): self.result[i]['x'] = x for i, vx in enumerate(test_pred[0,:,1]): self.result[i]['vx'] = vx return cur_pred def to_json(self, filename): print('Save prediction to ', filename) with open(filename, 'w') as fout: data = {'frame_data': self.result} print('data:', data) print("Ready save to json...") # json.dump(data, fout, indent=4, ensure_ascii=False) json.dump(data, fout) print("Saved to json.") def lstm_process(self, bbox, fid): return x,vx
w = (b[2] - b[0] + 1.) / self.WIDTH area = h * w # x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot'])
random_line_split
lstm_predictor.py
import numpy as np import os, json import tensorflow as tf import matplotlib.pyplot as plt from .base_predictor import BasePredictor from tools.filter import Exponentially_weighted_averages from tools.bird_view_projection import bird_view_proj class LSTMRNN(object): def __init__(self, n_steps, input_size, output_size, cell_size, batch_size, LR, x_vx_mode): self.n_steps = n_steps self.input_size = input_size self.output_size = output_size self.cell_size = cell_size self.batch_size = batch_size self.learning_rate = LR self.x_vx_mode = x_vx_mode with tf.name_scope('inputs'): self.xs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='xs') self.ys = tf.placeholder(tf.float32, [batch_size, None, output_size], name='ys') with tf.variable_scope('in_hidden'): self.add_input_layer() with tf.variable_scope('LSTM_cell'): self.add_cell() with tf.variable_scope('out_hidden'): self.add_output_layer() with tf.name_scope('cost'): self.compute_cost() with tf.name_scope('train'): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost) def add_input_layer(self,): l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size) # Ws (in_size, cell_size) Ws_in = self._weight_variable([self.input_size, self.cell_size]) # bs (cell_size, ) bs_in = self._bias_variable([self.cell_size,]) # l_in_y = (batch * n_steps, cell_size) with tf.name_scope('Wx_plus_b'): l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in # reshape l_in_y ==> (batch, n_steps, cell_size) self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D') def add_cell(self): lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True) # lstm_cell = tf.contrib.rnn.MultiRNNCell( # [lstm_cell() for _ in range(3)], state_is_tuple=True) with tf.name_scope('initial_state'): self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32) self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn( lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) def add_output_layer(self): # shape = (batch * steps, cell_size) l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D') Ws_out = self._weight_variable([self.cell_size, self.output_size]) bs_out = self._bias_variable([self.output_size, ]) # shape = (batch * steps, output_size) with tf.name_scope('Wx_plus_b'): outputs = tf.matmul(l_out_x, Ws_out) + bs_out self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size]) def compute_cost(self): if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) elif self.x_vx_mode == 'x_vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' )+tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) else: pass with tf.name_scope('average_cost'): self.cost = tf.div( tf.reduce_sum(losses, name='losses_sum'), self.batch_size, name='average_cost') tf.summary.scalar('cost', self.cost) @staticmethod def ms_error(labels, logits): # return tf.reduce_sum(tf.square(tf.subtract(labels, logits))) return tf.square(tf.subtract(labels, logits)) def _weight_variable(self, shape, name='weights'): initializer = tf.random_normal_initializer(mean=0., stddev=1.,) return tf.get_variable(shape=shape, initializer=initializer, name=name) def _bias_variable(self, shape, name='biases'): initializer = tf.constant_initializer(0.1) return tf.get_variable(name=name, shape=shape, initializer=initializer) class LSTMPredictor(BasePredictor): def __init__(self, args): BasePredictor.__init__(self, args) self.x_vx_mode = args.x_vx_mode self.lstm_predictor_model = args.lstm_predictor_model self.Tx = 50 #args.n_steps self.M = 100 #args.batch_size self.n_a = 16 self.lr = args.learning_rate self.WIDTH = 500. self.HIGHT = 500. self.bboxes = [] self.samples = [] self.x_all = [] self.vx_all = [] if self.x_vx_mode == 'x': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'vx': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'x_vx': self.n_x = 3 self.n_y = 2 else: pass self.build_model() def get_test_batch(self, X_test, batch_size): X_test = np.asarray(X_test) X_test_batch = np.tile(X_test, (batch_size, 1, 1)) # print('X_test_batch shape:',X_test_batch.shape) return X_test_batch def extract_bbox(self, b): ''' "top": 597.832580566406, "right": 880.870239257812, "bot": 739.836364746094, "left": 686.165344238281 ''' # h = (b['bot'] - b['top'] + 1.) / self.HIGHT # w = (b['right'] - b['left'] + 1.) / self.WIDTH h = (b[3] - b[1] + 1.) / self.HIGHT w = (b[2] - b[0] + 1.) / self.WIDTH area = h * w # x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot']) # return [h, w, area, x] # return [h, w, area, area_dao] return [h, w, area] def extract_sample(self, filename, time_file): with open(filename) as fin: gts = json.loads(fin.read())['frame_data'] # t_samples = [extract_bbox(e['ref_bbox'])] for e in gts] t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts] # t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v times = [] with open(time_file, 'r') as fin: for line in fin.readlines(): times.append(float(line)) # add vx as new feature tvx = [] for i in range(len(times)): if i == 0: tvx.append(0) else: tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1])) # tvx[0] = tvx[1] # for i in range(len(times)): # t_samples[i].append(tvx[i]) return t_samples, t_targets def build_model(self): config = tf.ConfigProto(allow_soft_placement=True) g2 = tf.Graph() self.sess2 = tf.Session(config=config, graph=g2) with self.sess2.as_default(): with g2.as_default(): LR = tf.Variable(self.lr, trainable=False) self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode) saver = tf.train.Saver() saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm') saver.restore(self.sess2, saved_path+'_20000') def predict(self, bbox, time, fid): bbox = bbox[:4] self.bboxes.append(bbox) self.samples.append(self.extract_bbox(bbox)) test_batch = self.get_test_batch(self.samples, self.M) feed_dict = { self.model.xs: test_batch, } test_pred = self.sess2.run(self.model.pred, feed_dict=feed_dict).astype(np.float32) x = test_pred[0,:,0][-1] vx= test_pred[0,:,1][-1] # x, vx = self.lstm_process(bbox,fid) cur_pred = { 'fid': fid, 'vx': float(vx), 'x': float(x), # 'ref_bbox': { # 'top': float(bbox[0]), 'left': float(bbox[1]), # 'bot': float(bbox[2]), 'right': float(bbox[3]) # } } self.result.append(cur_pred) if fid == 500: for i, x in enumerate(test_pred[0,:,0]): self.result[i]['x'] = x for i, vx in enumerate(test_pred[0,:,1]): self.result[i]['vx'] = vx return cur_pred def to_json(self, filename): print('Save prediction to ', filename) with open(filename, 'w') as fout: data = {'frame_data': self.result} print('data:', data) print("Ready save to json...") # json.dump(data, fout, indent=4, ensure_ascii=False) json.dump(data, fout) print("Saved to json.") def
(self, bbox, fid): return x,vx
lstm_process
identifier_name
lstm_predictor.py
import numpy as np import os, json import tensorflow as tf import matplotlib.pyplot as plt from .base_predictor import BasePredictor from tools.filter import Exponentially_weighted_averages from tools.bird_view_projection import bird_view_proj class LSTMRNN(object): def __init__(self, n_steps, input_size, output_size, cell_size, batch_size, LR, x_vx_mode): self.n_steps = n_steps self.input_size = input_size self.output_size = output_size self.cell_size = cell_size self.batch_size = batch_size self.learning_rate = LR self.x_vx_mode = x_vx_mode with tf.name_scope('inputs'): self.xs = tf.placeholder(tf.float32, [batch_size, None, input_size], name='xs') self.ys = tf.placeholder(tf.float32, [batch_size, None, output_size], name='ys') with tf.variable_scope('in_hidden'): self.add_input_layer() with tf.variable_scope('LSTM_cell'): self.add_cell() with tf.variable_scope('out_hidden'): self.add_output_layer() with tf.name_scope('cost'): self.compute_cost() with tf.name_scope('train'): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.cost) def add_input_layer(self,): l_in_x = tf.reshape(self.xs, [-1, self.input_size], name='2_2D') # (batch*n_step, in_size) # Ws (in_size, cell_size) Ws_in = self._weight_variable([self.input_size, self.cell_size]) # bs (cell_size, ) bs_in = self._bias_variable([self.cell_size,]) # l_in_y = (batch * n_steps, cell_size) with tf.name_scope('Wx_plus_b'): l_in_y = tf.matmul(l_in_x, Ws_in) + bs_in # reshape l_in_y ==> (batch, n_steps, cell_size) self.l_in_y = tf.reshape(l_in_y, [self.batch_size, -1, self.cell_size], name='2_3D') def add_cell(self): lstm_cell = tf.contrib.rnn.BasicLSTMCell(self.cell_size, forget_bias=1.0, state_is_tuple=True) # lstm_cell = tf.contrib.rnn.MultiRNNCell( # [lstm_cell() for _ in range(3)], state_is_tuple=True) with tf.name_scope('initial_state'): self.cell_init_state = lstm_cell.zero_state(self.batch_size, dtype=tf.float32) self.cell_outputs, self.cell_final_state = tf.nn.dynamic_rnn( lstm_cell, self.l_in_y, initial_state=self.cell_init_state, time_major=False) def add_output_layer(self): # shape = (batch * steps, cell_size) l_out_x = tf.reshape(self.cell_outputs, [-1, self.cell_size], name='2_2D') Ws_out = self._weight_variable([self.cell_size, self.output_size]) bs_out = self._bias_variable([self.output_size, ]) # shape = (batch * steps, output_size) with tf.name_scope('Wx_plus_b'): outputs = tf.matmul(l_out_x, Ws_out) + bs_out self.pred = tf.reshape(outputs, [self.batch_size, -1, self.output_size]) def compute_cost(self): if self.x_vx_mode == 'x' or self.x_vx_mode == 'vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred, [-1], name='reshape_pred')], [tf.reshape(self.ys, [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps * self.output_size], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) elif self.x_vx_mode == 'x_vx': losses = tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,0], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,0], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps ], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' )+tf.contrib.legacy_seq2seq.sequence_loss_by_example( [tf.reshape(self.pred[:,:,1], [-1], name='reshape_pred')], [tf.reshape(self.ys[:,:,1], [-1], name='reshape_target')], [tf.ones([self.batch_size * self.n_steps], dtype=tf.float32)], average_across_timesteps=True, softmax_loss_function=self.ms_error, name='losses' ) else: pass with tf.name_scope('average_cost'): self.cost = tf.div( tf.reduce_sum(losses, name='losses_sum'), self.batch_size, name='average_cost') tf.summary.scalar('cost', self.cost) @staticmethod def ms_error(labels, logits): # return tf.reduce_sum(tf.square(tf.subtract(labels, logits))) return tf.square(tf.subtract(labels, logits)) def _weight_variable(self, shape, name='weights'): initializer = tf.random_normal_initializer(mean=0., stddev=1.,) return tf.get_variable(shape=shape, initializer=initializer, name=name) def _bias_variable(self, shape, name='biases'): initializer = tf.constant_initializer(0.1) return tf.get_variable(name=name, shape=shape, initializer=initializer) class LSTMPredictor(BasePredictor): def __init__(self, args): BasePredictor.__init__(self, args) self.x_vx_mode = args.x_vx_mode self.lstm_predictor_model = args.lstm_predictor_model self.Tx = 50 #args.n_steps self.M = 100 #args.batch_size self.n_a = 16 self.lr = args.learning_rate self.WIDTH = 500. self.HIGHT = 500. self.bboxes = [] self.samples = [] self.x_all = [] self.vx_all = [] if self.x_vx_mode == 'x': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'vx': self.n_x = 3 self.n_y = 1 elif self.x_vx_mode == 'x_vx': self.n_x = 3 self.n_y = 2 else: pass self.build_model() def get_test_batch(self, X_test, batch_size): X_test = np.asarray(X_test) X_test_batch = np.tile(X_test, (batch_size, 1, 1)) # print('X_test_batch shape:',X_test_batch.shape) return X_test_batch def extract_bbox(self, b): ''' "top": 597.832580566406, "right": 880.870239257812, "bot": 739.836364746094, "left": 686.165344238281 ''' # h = (b['bot'] - b['top'] + 1.) / self.HIGHT # w = (b['right'] - b['left'] + 1.) / self.WIDTH h = (b[3] - b[1] + 1.) / self.HIGHT w = (b[2] - b[0] + 1.) / self.WIDTH area = h * w # x, y = bird_proj.proj((b['left'] + b['right'])/2, b['bot']) # return [h, w, area, x] # return [h, w, area, area_dao] return [h, w, area] def extract_sample(self, filename, time_file): with open(filename) as fin: gts = json.loads(fin.read())['frame_data'] # t_samples = [extract_bbox(e['ref_bbox'])] for e in gts] t_samples = [self.extract_bbox(e['ref_bbox']) for e in gts] # t_targets = [e['vx'] for e in gts] # e['x'] for dis, e['vx'] for relative v t_targets = [[e['x'], e['vx']] for e in gts] # e['x'] for dis, e['vx'] for relative v times = [] with open(time_file, 'r') as fin: for line in fin.readlines(): times.append(float(line)) # add vx as new feature tvx = [] for i in range(len(times)): if i == 0: tvx.append(0) else: tvx.append((t_samples[i][-1] - t_samples[i - 1][-1]) / (times[i] - times[i - 1])) # tvx[0] = tvx[1] # for i in range(len(times)): # t_samples[i].append(tvx[i]) return t_samples, t_targets def build_model(self): config = tf.ConfigProto(allow_soft_placement=True) g2 = tf.Graph() self.sess2 = tf.Session(config=config, graph=g2) with self.sess2.as_default(): with g2.as_default(): LR = tf.Variable(self.lr, trainable=False) self.model = LSTMRNN(self.Tx, self.n_x, self.n_y, self.n_a, self.M, LR, self.x_vx_mode) saver = tf.train.Saver() saved_path = os.path.join(self.lstm_predictor_model,self.x_vx_mode,self.x_vx_mode+'_lstm') saver.restore(self.sess2, saved_path+'_20000') def predict(self, bbox, time, fid): bbox = bbox[:4] self.bboxes.append(bbox) self.samples.append(self.extract_bbox(bbox)) test_batch = self.get_test_batch(self.samples, self.M) feed_dict = { self.model.xs: test_batch, } test_pred = self.sess2.run(self.model.pred, feed_dict=feed_dict).astype(np.float32) x = test_pred[0,:,0][-1] vx= test_pred[0,:,1][-1] # x, vx = self.lstm_process(bbox,fid) cur_pred = { 'fid': fid, 'vx': float(vx), 'x': float(x), # 'ref_bbox': { # 'top': float(bbox[0]), 'left': float(bbox[1]), # 'bot': float(bbox[2]), 'right': float(bbox[3]) # } } self.result.append(cur_pred) if fid == 500: for i, x in enumerate(test_pred[0,:,0]): self.result[i]['x'] = x for i, vx in enumerate(test_pred[0,:,1]):
return cur_pred def to_json(self, filename): print('Save prediction to ', filename) with open(filename, 'w') as fout: data = {'frame_data': self.result} print('data:', data) print("Ready save to json...") # json.dump(data, fout, indent=4, ensure_ascii=False) json.dump(data, fout) print("Saved to json.") def lstm_process(self, bbox, fid): return x,vx
self.result[i]['vx'] = vx
conditional_block
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "securetoken@system.gserviceaccount.com". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if !entry.headers.key_id.is_some() { continue; } let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// securetoken@system.gserviceaccount.com service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("securetoken@system.gserviceaccount.com")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn
() { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
deserialize_credentials
identifier_name
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "securetoken@system.gserviceaccount.com". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?;
/// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if !entry.headers.key_id.is_some() { continue; } let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// securetoken@system.gserviceaccount.com service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("securetoken@system.gserviceaccount.com")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn deserialize_credentials() { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
/// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?;
random_line_split
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "securetoken@system.gserviceaccount.com". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if !entry.headers.key_id.is_some() { continue; } let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// securetoken@system.gserviceaccount.com service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("securetoken@system.gserviceaccount.com")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn deserialize_credentials()
{ let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
identifier_body
credentials.rs
//! # Credentials for accessing the Firebase REST API //! This module contains the [`crate::credentials::Credentials`] type, used by [`crate::sessions`] to create and maintain //! authentication tokens for accessing the Firebase REST API. use chrono::Duration; use serde::{Deserialize, Serialize}; use serde_json; use std::collections::BTreeMap; use std::fs::File; use std::sync::Arc; use super::jwt::{create_jwt_encoded, download_google_jwks, verify_access_token, JWKSet, JWT_AUDIENCE_IDENTITY}; use crate::errors::FirebaseError; use std::io::BufReader; type Error = super::errors::FirebaseError; /// This is not defined in the json file and computed #[derive(Default, Clone)] pub(crate) struct Keys { pub pub_key: BTreeMap<String, Arc<biscuit::jws::Secret>>, pub secret: Option<Arc<biscuit::jws::Secret>>, } /// Service account credentials /// /// Especially the service account email is required to retrieve the public java web key set (jwks) /// for verifying Google Firestore tokens. /// /// The api_key is necessary for interacting with the Firestore REST API. /// /// Internals: /// /// The private key is used for signing JWTs (javascript web token). /// A signed jwt, encoded as a base64 string, can be exchanged into a refresh and access token. #[derive(Serialize, Deserialize, Default, Clone)] pub struct Credentials { pub project_id: String, pub private_key_id: String, pub private_key: String, pub client_email: String, pub client_id: String, pub api_key: String, #[serde(default, skip)] pub(crate) keys: Keys, } /// Converts a PEM (ascii base64) encoded private key into the binary der representation pub fn pem_to_der(pem_file_contents: &str) -> Result<Vec<u8>, Error> { use base64::decode; let pem_file_contents = pem_file_contents .find("-----BEGIN") // Cut off the first BEGIN part .and_then(|i| Some(&pem_file_contents[i + 10..])) // Find the trailing ---- after BEGIN and cut that off .and_then(|str| str.find("-----").and_then(|i| Some(&str[i + 5..]))) // Cut off -----END .and_then(|str| str.rfind("-----END").and_then(|i| Some(&str[..i]))); if pem_file_contents.is_none() { return Err(FirebaseError::Generic( "Invalid private key in credentials file. Must be valid PEM.", )); } let base64_body = pem_file_contents.unwrap().replace("\n", ""); Ok(decode(&base64_body) .map_err(|_| FirebaseError::Generic("Invalid private key in credentials file. Expected Base64 data."))?) } #[test] fn pem_to_der_test() { const INPUT: &str = r#"-----BEGIN PRIVATE KEY----- MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCTbt9Rs2niyIRE FIdrhIN757eq/1Ry/VhZALBXAveg+lt+ui/9EHtYPJH1A9NyyAwChs0UCRWqkkEo Amtz4dJQ1YlGi0/BGhK2lg== -----END PRIVATE KEY----- "#; const EXPECTED: [u8; 112] = [ 48, 130, 4, 188, 2, 1, 0, 48, 13, 6, 9, 42, 134, 72, 134, 247, 13, 1, 1, 1, 5, 0, 4, 130, 4, 166, 48, 130, 4, 162, 2, 1, 0, 2, 130, 1, 1, 0, 147, 110, 223, 81, 179, 105, 226, 200, 132, 68, 20, 135, 107, 132, 131, 123, 231, 183, 170, 255, 84, 114, 253, 88, 89, 0, 176, 87, 2, 247, 160, 250, 91, 126, 186, 47, 253, 16, 123, 88, 60, 145, 245, 3, 211, 114, 200, 12, 2, 134, 205, 20, 9, 21, 170, 146, 65, 40, 2, 107, 115, 225, 210, 80, 213, 137, 70, 139, 79, 193, 26, 18, 182, 150, ]; assert_eq!(&EXPECTED[..], &pem_to_der(INPUT).unwrap()[..]); } impl Credentials { /// Create a [`Credentials`] object by parsing a google-service-account json string /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json" and /// a downloaded jwk-set file is called "service-account-test.jwks" this example embeds /// the file content during compile time. This avoids and http or io calls. /// /// ``` /// use firestore_db_and_auth::{Credentials}; /// use firestore_db_and_auth::jwt::JWKSet; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .with_jwkset(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?)?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` /// /// You need two JWKS files for this crate to work: /// * https://www.googleapis.com/service_accounts/v1/jwk/securetoken@system.gserviceaccount.com /// * https://www.googleapis.com/service_accounts/v1/jwk/{your-service-account-email} pub fn new(credentials_file_content: &str) -> Result<Credentials, Error> { let mut credentials: Credentials = serde_json::from_str(credentials_file_content)?; credentials.compute_secret()?; Ok(credentials) } /// Create a [`Credentials`] object by reading and parsing a google-service-account json file. /// /// This is a convenience method, that reads in the given credentials file and acts otherwise the same as /// the [`Credentials::new`] method. pub fn from_file(credential_file: &str) -> Result<Self, Error> { let f = BufReader::new(File::open(credential_file)?); let mut credentials: Credentials = serde_json::from_reader(f)?; credentials.compute_secret()?; Ok(credentials) } /// Adds public-key JWKs to a credentials instance and returns it. /// /// This method will also verify that the given JWKs files allow verification of Google access tokens. /// This is a convenience method, you may also just use [`Credentials::add_jwks_public_keys`]. pub fn with_jwkset(mut self, jwks: &JWKSet) -> Result<Credentials, Error> { self.add_jwks_public_keys(jwks); self.verify()?; Ok(self) } /// The public keys to verify generated tokens will be downloaded, for the given service account as well as /// for "securetoken@system.gserviceaccount.com". /// Do not use this option if additional downloads are not desired, /// for example in cloud functions that require fast cold boot start times. /// /// You can use [`Credentials::add_jwks_public_keys`] to manually add/replace public keys later on. /// /// Example: /// /// Assuming that your firebase service account credentials file is called "service-account-test.json". /// /// ```no_run /// use firestore_db_and_auth::{Credentials}; /// /// let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json"))? /// .download_jwkset()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn download_jwkset(mut self) -> Result<Credentials, Error> { self.download_google_jwks()?; self.verify()?; Ok(self) } /// Verifies that creating access tokens is possible with the given credentials and public keys. /// Returns an empty result type on success. pub fn verify(&self) -> Result<(), Error> { let access_token = create_jwt_encoded( &self, Some(["admin"].iter()), Duration::hours(1), Some(self.client_id.clone()), None, JWT_AUDIENCE_IDENTITY, )?; verify_access_token(&self, &access_token)?; Ok(()) } /// Find the secret in the jwt set that matches the given key id, if any. /// Used for jws validation pub fn decode_secret(&self, kid: &str) -> Option<Arc<biscuit::jws::Secret>> { self.keys.pub_key.get(kid).and_then(|f| Some(f.clone())) } /// Add a JSON Web Key Set (JWKS) to allow verification of Google access tokens. /// /// Example: /// /// ``` /// use firestore_db_and_auth::credentials::Credentials; /// use firestore_db_and_auth::JWKSet; /// /// let mut c : Credentials = serde_json::from_str(include_str!("../tests/service-account-test.json"))?; /// c.add_jwks_public_keys(&JWKSet::new(include_str!("../tests/service-account-test.jwks"))?); /// c.compute_secret()?; /// c.verify()?; /// # Ok::<(), firestore_db_and_auth::errors::FirebaseError>(()) /// ``` pub fn add_jwks_public_keys(&mut self, jwkset: &JWKSet) { for entry in jwkset.keys.iter() { if !entry.headers.key_id.is_some()
let key_id = entry.headers.key_id.as_ref().unwrap().to_owned(); self.keys .pub_key .insert(key_id, Arc::new(entry.ne.jws_public_key_secret())); } } /// If you haven't called [`Credentials::add_jwks_public_keys`] to manually add public keys, /// this method will download one for your google service account and one for the oauth related /// securetoken@system.gserviceaccount.com service account. pub fn download_google_jwks(&mut self) -> Result<(), Error> { let jwks = download_google_jwks(&self.client_email)?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); let jwks = download_google_jwks("securetoken@system.gserviceaccount.com")?; self.add_jwks_public_keys(&JWKSet::new(&jwks)?); Ok(()) } /// Compute the Rsa keypair by using the private_key of the credentials file. /// You must call this if you have manually created a credentials object. /// /// This is automatically invoked if you use [`Credentials::new`] or [`Credentials::from_file`]. pub fn compute_secret(&mut self) -> Result<(), Error> { use biscuit::jws::Secret; use ring::signature; let vec = pem_to_der(&self.private_key)?; let key_pair = signature::RsaKeyPair::from_pkcs8(&vec)?; self.keys.secret = Some(Arc::new(Secret::RsaKeyPair(Arc::new(key_pair)))); Ok(()) } } #[doc(hidden)] #[allow(dead_code)] pub fn doctest_credentials() -> Credentials { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed") } #[test] fn deserialize_credentials() { let jwk_list = JWKSet::new(include_str!("../tests/service-account-test.jwks")).unwrap(); let c: Credentials = Credentials::new(include_str!("../tests/service-account-test.json")) .expect("Failed to deserialize credentials") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); use std::path::PathBuf; let mut credential_file = PathBuf::from(env!("CARGO_MANIFEST_DIR")); credential_file.push("tests/service-account-test.json"); let c = Credentials::from_file(credential_file.to_str().unwrap()) .expect("Failed to open credentials file") .with_jwkset(&jwk_list) .expect("JWK public keys verification failed"); assert_eq!(c.api_key, "api_key"); }
{ continue; }
conditional_block
zcooldl.py
# @Author: lonsty # @Date: 2019-09-07 18:34:18 import json import math import os.path as op import re import sys import threading import time from collections import namedtuple from concurrent.futures import ThreadPoolExecutor, as_completed, wait from datetime import datetime from pathlib import Path from queue import Empty, Queue from typing import List from urllib.parse import urljoin, urlparse from uuid import uuid4 import requests from bs4 import BeautifulSoup from termcolor import colored, cprint from zcooldl.utils import (mkdirs_if_not_exist, retry, safe_filename, sort_records) Scrapy = namedtuple('Scrapy', 'type author title objid index url') # 用于记录下载任务 HEADERS = { 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' } HOST_PAGE = 'https://www.zcool.com.cn' SEARCH_DESIGNER_SUFFIX = '/search/designer?&word={word}' USER_SUFFIX = '/u/{id}' PAGE_SUFFIX = '?myCate=0&sort=1&p={page}' WORK_SUFFIX = '/work/content/show?p=1&objectId={objid}' COLLECTION_SUFFIX = '/collection/contents?id={objid}&p={page}&pageSize=25' USER_API = 'https://www.zcool.com.cn/member/card/{id}' TIMEOUT = 30 Q_TIMEOUT = 1 MAX_WORKERS = 20 RETRIES = 3 thread_local = threading.local() def get_session(): """使线程获取同一个 Session,可减少 TCP 连接数,加速请求。 :return requests.Session: session """ if not hasattr(thread_local, "session"): thread_local.session = requests.Session() return thread_local.session @retry(Exception, tries=RETRIES) def session_request(url: str, method: str = 'GET') -> requests.Response: """使用 session 请求数据。使用了装饰器 retry,在网络异常导致错误时会重试。 :param str url: 目标请求 URL :param str method: 请求方式 :return requests.Response: 响应数据 """ resp = get_session().request(method, url, headers=HEADERS, timeout=TIMEOUT) resp.raise_for_status() return resp class ZCoolScraper(): def __init__(self, user_id=None, username=None, collection=None, destination=None, max_pages=None, spec_topics=None, max_topics=None, max_workers=None, retries=None, redownload=None, overwrite=False, thumbnail=False): """初始化下载参数。 :param int user_id: 用户 ID :param str username: 用户名 :param HttpUrl collection: 收藏集 URL :param str destination: 图片保存到本地的路径,默认当前路径 :param int max_pages: 最大爬取页数,默认所有 :param list spec_topics: 需要下载的特定主题 :param int max_topics: 最大下载主题数量,默认所有 :param int max_workers: 线程开启个数,默认 20 :param int retries: 请求异常时的重试次数,默认 3 :param str redownload: 下载记录文件,给定此文件则从失败记录进行下载 :param bool overwrite: 是否覆盖已存在的文件,默认 False :param bool thumbnail: 是否下载缩略图,默认 False """ self.start_time = datetime.now() print(f' - - - - - -+-+ {self.start_time.ctime()} +-+- - - - - -\n') self.collection = collection self.spec_topics = spec_topics self.max_topics = max_topics or 'all' self.max_workers = max_workers or MAX_WORKERS self.pool = ThreadPoolExecutor(self.max_workers) self.overwrite = overwrite self.thumbnail = thumbnail self.pages = Queue() self.topics = Queue() self.images = Queue() self.stat = { 'npages': 0, 'ntopics': 0, 'nimages': 0, 'pages_pass': set(), 'pages_fail': set(), 'topics_pass': set(), 'topics_fail': set(), 'images_pass': set(), 'images_fail': set() } if retries: # 重置全局变量 RETRIES global RETRIES RETRIES = retries dest = Path(destination or '', urlparse(HOST_PAGE).netloc) # 从记录文件中的失败项开始下载 if redownload: self.username = self.reload_records(redownload) self.user_id = self.search_id_by_username(self.username) self.max_pages = self.pages.qsize() self.max_topics = self.topics.qsize() self.directory = dest / safe_filename(self.username) self.stat.update({ 'npages': self.max_pages, 'ntopics': self.max_topics, 'nimages': self.images.qsize() }) print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages:2d}\n' f'{"Topics to scrapy".rjust(17)}: {self.max_topics:3d}\n' f'{"Images to scrapy".rjust(17)}: {self.images.qsize():4d}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.fetch_all(initialized=True) return # 从收藏集下载 if collection: objid = self.parse_objid(collection, is_collection=True) resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=1))) data = resp.json().get('data', {}) total = data.get('total', 0) page_size = data.get('pageable', {}).get('pageSize') max_pages_ = math.ceil(total / page_size) self.max_pages = min(max_pages or 9999, max_pages_) self.directory = dest / safe_filename(f'{self.username}-{self._collection_name}') self.parse_collection_topics(data.get('content')) # 解析第 2 页 至 最大页的 topic 到下载任务 for page in range(2, self.max_pages + 1): resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=page))) self.parse_collection_topics(topics=resp.json().get('data', {}).get('content'), offset=page_size * (page - 1)) # 根据用户 ID 或用户名下载 else: self.user_id = user_id or self.search_id_by_username(username) self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id)) try: response = session_request(self.base_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {self.base_url}, {e}', 'red') sys.exit(1) soup = BeautifulSoup(markup=response.text, features='html.parser') try: author = soup.find(name='div', id='body').get('data-name') if username and username != author: cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red') sys.exit(1) self.username = author except Exception: self.username = username or 'anonymous' self.directory = dest / safe_filename(self.username) try: max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text) except Exception: max_pages_ = 1 self.max_pages = min(max_pages or 9999, max_pages_) if self.spec_topics: topics = ', '.join(self.spec_topics) elif self.max_topics == 'all': topics = 'all' else: topics = self.max_pages * self.max_topics print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Maximum pages".rjust(17)}: {max_pages_}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n' f'{"Topics to scrapy".rjust(17)}: {topics}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.END_PARSING_TOPICS = False self.fetch_all(initialized=True if self.collection else False) def search_id_by_username(self, username): """通过用户昵称查找用户 ID。 :param str username: 用户昵称 :return int: 用户 ID """ if not username: cprint('Must give an <user id> or <username>!', 'yellow') sys.exit(1) search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username)) try: response = session_request(search_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {search_url}, {e}', 'red') sys.exit(1) author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info') if (not author_1st) or (author_1st.get('data-name') != username): cprint(f'Username「{username}」does not exist!', 'yellow') sys.exit(1) return author_1st.get('data-id') def reload_records(self, file): """从本地下载记录里读取下载失败的内容。 :param str file: 下载记录文件的路径。 :return str: 用户名 """ with open(file, 'r', encoding='utf-8') as f: for fail in json.loads(f.read()).get('fail'): scrapy = Scrapy._make(fail.values()) if scrapy.type == 'page': self.pages.put(scrapy) elif scrapy.type == 'topic': self.topics.put(scrapy) elif scrapy.type == 'image': self.images.put(scrapy) return scrapy.author def generate_pages(self): """根据最大下载页数,生成需要爬取主页的任务。""" for page in range(1, self.max_pages + 1): suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX url = urljoin(self.base_url, suffix.format(page=page)) scrapy = Scrapy(type='page', author=self.username, title=page, objid=None, index=page - 1, url=url) if scrapy not in self.stat["pages_pass"]: self.pages.put(scrapy) def parse_collection_topics(self, topics: List[dict], offset: int = 0): for idx, topic in enumerate(topics): new_scrapy = Scrapy(type='topic', author=topic.get('creatorObj', {}).get('username'), title=topic.get('title'), objid=topic.get('id'), index=offset + idx, url=topic.get('pageUrl')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 def parse_topics(self, scrapy): """爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ resp = session_request(scrapy.url) cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover') for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]): title = card.get('title') if self.spec_topics and (title not in self.spec_topics): continue new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title, objid=None, index=idx, url=card.get('href')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 return scrapy def fetch_topics(self): """从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。""" page_futures = {} while True: try: scrapy = self.pages.get(timeout=Q_TIMEOUT) page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy except Empty: break except Exception: continue for future in as_completed(page_futures): scrapy = page_futures.get(future) try: future.result() self.stat["pages_pass"].add(scrapy) except Exception: self.stat["pages_fail"].add(scrapy) cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red') self.END_PARSING_TOPICS = True def parse_objid(self, url: str, is_collection: bool = False) -> str: """根据 topic 页面解析 objid :param url: topic 或 collection 的 URL :return: objid """ soup = BeautifulSoup(session_request(url).text, 'html.parser') objid = soup.find('input', id='dataInput').attrs.get('data-objid') if is_collection: self._collection_name = soup.find('h2', class_='title-h2').text user = soup.find(name='span', class_='details-user-avatar') self.user_id = user.find('div').attrs.get('data-id') self.username = user.find('a').attrs.get('title') return objid def parse_images(self, scrapy): """爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息, 并将下载图片的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ objid = scrapy.objid or self.parse_objid(scrapy.url) resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid))) data = resp.json().get('data', {}) author = data.get('product', {}).get('creatorObj', {}).get('username') title = data.get('product', {}).get('title') objid = data.get('product', {}).get('id') for img in data.get('allImageList', []): new_scrapy = Scrapy(type='image', author=author, title=title, objid=objid, index=img.get('orderNo') or 0, url=img.get('url')) if new_scrapy not in self.stat["images_pass"]: self.im
fetch_futures = [self.pool.submit(self.fetch_topics), self.pool.submit(self.fetch_images)] end_show_fetch = False t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch}) t.start() try: wait(fetch_futures) except KeyboardInterrupt: raise finally: end_show_fetch = True t.join() def show_fetch_status(self, interval=0.5, end=None): """用于后台线程,实现边爬取边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format( pages=colored(str(self.max_pages).rjust(3), 'blue'), topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'), images=colored(str(self.stat["nimages"]).rjust(5), 'blue')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): print('\n') break time.sleep(interval) def show_download_status(self, interval=0.5, end=None): """用于后台线程,实现边下载边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"]) if self.stat["nimages"] > 0: status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format( time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'), failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'), completed=colored(str(int(completed / self.stat["nimages"] * 100)) + f'% ({completed}/{self.stat["nimages"]})', 'green')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): if self.stat["nimages"] > 0: print('\n') break time.sleep(interval) def download_image(self, scrapy): """下载图片保存到本地。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ try: name = re.findall(r'(?<=/)\w*?\.(?:jpg|gif|png|bmp)', scrapy.url, re.IGNORECASE)[0] except IndexError: name = uuid4().hex + '.jpg' path = self.directory / safe_filename(scrapy.title) filename = path / f'[{scrapy.index + 1 or 0:02d}]{name}' if (not self.overwrite) and op.isfile(filename): return scrapy url = scrapy.url if self.thumbnail: if url.lower().endswith(('jpg', 'png', 'bmp')): url = f'{scrapy.url}@1280w_1l_2o_100sh.{url[-3:]}' resp = session_request(url) mkdirs_if_not_exist(path) with open(filename, 'wb') as f: for chunk in resp.iter_content(8192): f.write(chunk) return scrapy def save_records(self): """将成功及失败的下载记录保存到本地文件。 :return str: 记录文件的路径 """ filename = f'{safe_filename(self.start_time.isoformat()[:-7])}.json' abspath = op.abspath(self.directory / filename) with open(abspath, 'w', encoding='utf-8') as f: success = (self.stat["pages_pass"] | self.stat["topics_pass"] | self.stat["images_pass"]) fail = (self.stat["pages_fail"] | self.stat["topics_fail"] | self.stat["images_fail"]) type_order = {'page': 1, 'topic': 2, 'image': 3} s_ordered = sort_records(success, order=type_order) f_ordered = sort_records(fail, order=type_order) records = { 'time': self.start_time.isoformat(), 'success': [scrapy._asdict() for scrapy in s_ordered], 'fail': [scrapy._asdict() for scrapy in f_ordered] } f.write(json.dumps(records, ensure_ascii=False, indent=2)) return abspath def run_scraper(self): """使用多线程下载所有图片,完成后保存记录并退出程序。""" end_show_download = False t = threading.Thread(target=self.show_download_status, kwargs={'end': lambda: end_show_download}) t.start() image_futuress = {} while True: try: scrapy = self.images.get_nowait() if scrapy not in self.stat["images_pass"]: image_futuress[self.pool.submit(self.download_image, scrapy)] = scrapy except Empty: break except KeyboardInterrupt: raise except Exception: continue try: for future in as_completed(image_futuress): scrapy = image_futuress.get(future) try: future.result() self.stat["images_pass"].add(scrapy) except Exception: self.stat["images_fail"].add(scrapy) cprint(f'Download image: {scrapy.title}[{scrapy.index + 1}] ' f'({scrapy.url}) failed.', 'red') except KeyboardInterrupt: raise finally: end_show_download = True t.join() saved_images = len(self.stat["images_pass"]) failed_images = len(self.stat["images_fail"]) if saved_images or failed_images: if saved_images: print(f'Saved {colored(saved_images, "green")} images to ' f'{colored(self.directory.absolute(), attrs=["underline"])}') records_path = self.save_records() print(f'Saved records to {colored(records_path, attrs=["underline"])}') else: cprint('No images to download.', 'yellow')
ages.put(new_scrapy) self.stat["nimages"] += 1 return scrapy def fetch_images(self): """从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。""" image_futures = {} while True: try: scrapy = self.topics.get(timeout=Q_TIMEOUT) image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy except Empty: if self.END_PARSING_TOPICS: break except Exception: continue for future in as_completed(image_futures): scrapy = image_futures.get(future) try: future.result() self.stat["topics_pass"].add(scrapy) except Exception: self.stat["topics_fail"].add(scrapy) cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red') def fetch_all(self, initialized: bool = False): """同时爬取主页、主题,并更新状态。""" if not initialized: self.generate_pages()
identifier_body
zcooldl.py
# @Author: lonsty # @Date: 2019-09-07 18:34:18 import json import math import os.path as op import re import sys import threading import time from collections import namedtuple from concurrent.futures import ThreadPoolExecutor, as_completed, wait from datetime import datetime from pathlib import Path from queue import Empty, Queue from typing import List from urllib.parse import urljoin, urlparse from uuid import uuid4 import requests from bs4 import BeautifulSoup from termcolor import colored, cprint from zcooldl.utils import (mkdirs_if_not_exist, retry, safe_filename, sort_records) Scrapy = namedtuple('Scrapy', 'type author title objid index url') # 用于记录下载任务 HEADERS = { 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' } HOST_PAGE = 'https://www.zcool.com.cn' SEARCH_DESIGNER_SUFFIX = '/search/designer?&word={word}' USER_SUFFIX = '/u/{id}' PAGE_SUFFIX = '?myCate=0&sort=1&p={page}' WORK_SUFFIX = '/work/content/show?p=1&objectId={objid}' COLLECTION_SUFFIX = '/collection/contents?id={objid}&p={page}&pageSize=25' USER_API = 'https://www.zcool.com.cn/member/card/{id}' TIMEOUT = 30 Q_TIMEOUT = 1 MAX_WORKERS = 20 RETRIES = 3 thread_local = threading.local() def get_session(): """使线程获取同一个 Session,可减少 TCP 连接数,加速请求。 :return requests.Session: session """ if not hasattr(thread_local, "session"): thread_local.session = requests.Session() return thread_local.session @retry(Exception, tries=RETRIES) def session_request(url: str, method: str = 'GET') -> requests.Response: """使用 session 请求数据。使用了装饰器 retry,在网络异常导致错误时会重试。 :param str url: 目标请求 URL :param str method: 请求方式 :return requests.Response: 响应数据 """ resp = get_session().request(method, url, headers=HEADERS, timeout=TIMEOUT) resp.raise_for_status() return resp class ZCoolScraper(): def __init__(self, user_id=None, username=None, collection=None, destination=None, max_pages=None, spec_topics=None, max_topics=None, max_workers=None, retries=None, redownload=None, overwrite=False, thumbnail=False): """初始化下载参数。 :param int user_id: 用户 ID :param str username: 用户名 :param HttpUrl collection: 收藏集 URL :param str destination: 图片保存到本地的路径,默认当前路径 :param int max_pages: 最大爬取页数,默认所有 :param list spec_topics: 需要下载的特定主题 :param int max_topics: 最大下载主题数量,默认所有 :param int max_workers: 线程开启个数,默认 20 :param int retries: 请求异常时的重试次数,默认 3 :param str redownload: 下载记录文件,给定此文件则从失败记录进行下载 :param bool overwrite: 是否覆盖已存在的文件,默认 False :param bool thumbnail: 是否下载缩略图,默认 False """ self.start_time = datetime.now() print(f' - - - - - -+-+ {self.start_time.ctime()} +-+- - - - - -\n') self.collection = collection self.spec_topics = spec_topics self.max_topics = max_topics or 'all' self.max_workers = max_workers or MAX_WORKERS self.pool = ThreadPoolExecutor(self.max_workers) self.overwrite = overwrite self.thumbnail = thumbnail self.pages = Queue() self.topics = Queue() self.images = Queue() self.stat = { 'npages': 0, 'ntopics': 0, 'nimages': 0, 'pages_pass': set(), 'pages_fail': set(), 'topics_pass': set(), 'topics_fail': set(), 'images_pass': set(), 'images_fail': set() } if retries: # 重置全局变量 RETRIES global RETRIES RETRIES = retries dest = Path(destination or '', urlparse(HOST_PAGE).netloc) # 从记录文件中的失败项开始下载 if redownload: self.username = self.reload_records(redownload) self.user_id = self.search_id_by_username(self.username) self.max_pages = self.pages.qsize() self.max_topics = self.topics.qsize() self.directory = dest / safe_filename(self.username) self.stat.update({ 'npages': self.max_pages, 'ntopics': self.max_topics, 'nimages': self.images.qsize() }) print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages:2d}\n' f'{"Topics to scrapy".rjust(17)}: {self.max_topics:3d}\n' f'{"Images to scrapy".rjust(17)}: {self.images.qsize():4d}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.fetch_all(initialized=True) return # 从收藏集下载 if collection: objid = self.parse_objid(collection, is_collection=True) resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=1))) data = resp.json().get('data', {}) total = data.get('total', 0) page_size = data.get('pageable', {}).get('pageSize') max_pages_ = math.ceil(total / page_size) self.max_pages = min(max_pages or 9999, max_pages_) self.directory = dest / safe_filename(f'{self.username}-{self._collection_name}') self.parse_collection_topics(data.get('content')) # 解析第 2 页 至 最大页的 topic 到下载任务 for page in range(2, self.max_pages + 1): resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=page))) self.parse_collection_topics(topics=resp.json().get('data', {}).get('content'), offset=page_size * (page - 1)) # 根据用户 ID 或用户名下载 else: self.user_id = user_id or self.search_id_by_username(username) self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id)) try: response = session_request(self.base_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {self.base_url}, {e}', 'red') sys.exit(1) soup = BeautifulSoup(markup=response.text, features='html.parser') try: author = soup.find(name='div', id='body').get('data-name') if username and username != author: cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red') sys.exit(1) self.username = author except Exception: self.username = username or 'anonymous' self.directory = dest / safe_filename(self.username) try: max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text) except Exception: max_pages_ = 1 self.max_pages = min(max_pages or 9999, max_pages_) if self.spec_topics: topics = ', '.join(self.spec_topics) elif self.max_topics == 'all': topics = 'all' else: topics = self.max_pages * self.max_topics print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Maximum pages".rjust(17)}: {max_pages_}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n' f'{"Topics to scrapy".rjust(17)}: {topics}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.END_PARSING_TOPICS = False self.fetch_all(initialized=True if self.collection else False) def search_id_by_username(self, username): """通过用户昵称查找用户 ID。 :param str username: 用户昵称 :return int: 用户 ID """ if not username: cprint('Must give an <user id> or <username>!', 'yellow') sys.exit(1) search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username)) try: response = session_request(search_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {search_url}, {e}', 'red') sys.exit(1) author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info') if (not author_1st) or (author_1st.get('data-name') != username): cprint(f'Username「{username}」does not exist!', 'yellow') sys.exit(1) return author_1st.get('data-id') def reload_records(self, file): """从本地下载记录里读取下载失败的内容。 :param str file: 下载记录文件的路径。 :return str: 用户名 """ with open(file, 'r', encoding='utf-8') as f: for fail in json.loads(f.read()).get('fail'): scrapy = Scrapy._make(fail.values()) if scrapy.type == 'page': self.pages.put(scrapy) elif scrapy.type == 'topic': self.topics.put(scrapy) elif scrapy.type == 'image': self.images.put(scrapy) return scrapy.author def generate_pages(self): """根据最大下载页数,生成需要爬取主页的任务。""" for page in range(1, self.max_pages + 1): suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX url = urljoin(self.base_url, suffix.format(page=page)) scrapy = Scrapy(type='page', author=self.username, title=page, objid=None, index=page - 1, url=url) if scrapy not in self.stat["pages_pass"]: self.pages.put(scrapy) def parse_collection_topics(self, topics: List[dict], offset: int = 0): for idx, topic in enumerate(topics): new_scrapy = Scrapy(type='topic', author=topic.get('creatorObj', {}).get('username'), title=topic.get('title'), objid=topic.get('id'), index=offset + idx, url=topic.get('pageUrl')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 def parse_topics(self, scrapy): """爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ resp = session_request(scrapy.url) cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover') for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]): title = card.get('title') if self.spec_topics and (title not in self.spec_topics): continue new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title, objid=None, index=idx, url=card.get('href')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 return scrapy def fetch_topics(self): """从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。""" page_futures = {} while True: try: scrapy = self.pages.get(timeout=Q_TIMEOUT) page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy except Empty: break except Exception: continue for future in as_completed(page_futures): scrapy = page_futures.get(future) try: future.result() self.stat["pages_pass"].add(scrapy) except Exception: self.stat["pages_fail"].add(scrapy) cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red') self.END_PARSING_TOPICS = True def parse_objid(self, url: str, is_collection: bool = False) -> str: """根据 topic 页面解析 objid :param url: topic 或 collection 的 URL :return: objid """ soup = BeautifulSoup(session_request(url).text, 'html.parser') objid = soup.find('input', id='dataInput').attrs.get('data-objid') if is_collection: self._collection_name = soup.find('h2', class_='title-h2').text user = soup.find(name='span', class_='details-user-avatar') self.user_id = user.find('div').attrs.get('data-id') self.username = user.find('a').attrs.get('title') return objid def parse_images(self, scrapy): """爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息, 并将下载图片的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ objid = scrapy.objid or self.parse_objid(scrapy.url) resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid))) data = resp.json().get('data', {}) author = data.get('product', {}).get('creatorObj', {}).get('username') title = data.get('product', {}).get('title') objid = data.get('product', {}).get('id') for img in data.get('allImageList', []): new_scrapy = Scrapy(type='image', author=author, title=title, objid=objid, index=img.get('orderNo') or 0, url=img.get('url')) if new_scrapy not in self.stat["images_pass"]: self.images.put(new_scrapy) self.stat["nimages"] += 1 return scrapy def fetch_images(self): """从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。""" image_futures = {} while True: try: scrapy = self.topics.get(timeout=Q_TIMEOUT) image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy except Empty: if self.END_PARSING_TOPICS: break except Exception: continue for future in as_completed(image_futures): scrapy = image_futures.get(future) try: future.result() self.stat["topics_pass"].add(scrapy) except Exception: self.stat["topics_fail"].add(scrapy) cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red') def fetch_all(self, initialized: bool = False): """同时爬取主页、主题,并更新状态。""" if not initialized: self.generate_pages() fetch_futures = [self.pool.submit(self.fetch_topics), self.pool.submit(self.fetch_images)] end_show_fetch = False t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch}) t.start() try: wait(fetch_futures) except KeyboardInterrupt: raise finally: end_show_fetch = True t.join() def show_fetch_status(self, interval=0.5, end=None): """用于后台线程,实现边爬取边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format( pages=colored(str(self.max_pages).rjust(3), 'blue'), topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'), images=colored(str(self.stat["nimages"]).rjust(5), 'blue')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): print('\n') break time.sleep(interval) def show_download_status(self, interval=0.5, end=None): """用于后台线程,实现边下载边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"]) if self.stat["nimages"] > 0: status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format( time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'), failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'), completed=colored(str(int(completed / self.stat["nimages"] * 100)) + f'% ({completed}/{self.stat["nimages"]})', 'green')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): if self.stat["nimages"] > 0: print('\n') break time.sleep(interval) def download_image(self,
:param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ try: name = re.findall(r'(?<=/)\w*?\.(?:jpg|gif|png|bmp)', scrapy.url, re.IGNORECASE)[0] except IndexError: name = uuid4().hex + '.jpg' path = self.directory / safe_filename(scrapy.title) filename = path / f'[{scrapy.index + 1 or 0:02d}]{name}' if (not self.overwrite) and op.isfile(filename): return scrapy url = scrapy.url if self.thumbnail: if url.lower().endswith(('jpg', 'png', 'bmp')): url = f'{scrapy.url}@1280w_1l_2o_100sh.{url[-3:]}' resp = session_request(url) mkdirs_if_not_exist(path) with open(filename, 'wb') as f: for chunk in resp.iter_content(8192): f.write(chunk) return scrapy def save_records(self): """将成功及失败的下载记录保存到本地文件。 :return str: 记录文件的路径 """ filename = f'{safe_filename(self.start_time.isoformat()[:-7])}.json' abspath = op.abspath(self.directory / filename) with open(abspath, 'w', encoding='utf-8') as f: success = (self.stat["pages_pass"] | self.stat["topics_pass"] | self.stat["images_pass"]) fail = (self.stat["pages_fail"] | self.stat["topics_fail"] | self.stat["images_fail"]) type_order = {'page': 1, 'topic': 2, 'image': 3} s_ordered = sort_records(success, order=type_order) f_ordered = sort_records(fail, order=type_order) records = { 'time': self.start_time.isoformat(), 'success': [scrapy._asdict() for scrapy in s_ordered], 'fail': [scrapy._asdict() for scrapy in f_ordered] } f.write(json.dumps(records, ensure_ascii=False, indent=2)) return abspath def run_scraper(self): """使用多线程下载所有图片,完成后保存记录并退出程序。""" end_show_download = False t = threading.Thread(target=self.show_download_status, kwargs={'end': lambda: end_show_download}) t.start() image_futuress = {} while True: try: scrapy = self.images.get_nowait() if scrapy not in self.stat["images_pass"]: image_futuress[self.pool.submit(self.download_image, scrapy)] = scrapy except Empty: break except KeyboardInterrupt: raise except Exception: continue try: for future in as_completed(image_futuress): scrapy = image_futuress.get(future) try: future.result() self.stat["images_pass"].add(scrapy) except Exception: self.stat["images_fail"].add(scrapy) cprint(f'Download image: {scrapy.title}[{scrapy.index + 1}] ' f'({scrapy.url}) failed.', 'red') except KeyboardInterrupt: raise finally: end_show_download = True t.join() saved_images = len(self.stat["images_pass"]) failed_images = len(self.stat["images_fail"]) if saved_images or failed_images: if saved_images: print(f'Saved {colored(saved_images, "green")} images to ' f'{colored(self.directory.absolute(), attrs=["underline"])}') records_path = self.save_records() print(f'Saved records to {colored(records_path, attrs=["underline"])}') else: cprint('No images to download.', 'yellow')
scrapy): """下载图片保存到本地。
conditional_block
zcooldl.py
# @Author: lonsty # @Date: 2019-09-07 18:34:18 import json import math import os.path as op import re import sys import threading import time from collections import namedtuple from concurrent.futures import ThreadPoolExecutor, as_completed, wait from datetime import datetime from pathlib import Path from queue import Empty, Queue from typing import List from urllib.parse import urljoin, urlparse from uuid import uuid4 import requests from bs4 import BeautifulSoup from termcolor import colored, cprint from zcooldl.utils import (mkdirs_if_not_exist, retry, safe_filename, sort_records) Scrapy = namedtuple('Scrapy', 'type author title objid index url') # 用于记录下载任务 HEADERS = { 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' } HOST_PAGE = 'https://www.zcool.com.cn' SEARCH_DESIGNER_SUFFIX = '/search/designer?&word={word}' USER_SUFFIX = '/u/{id}' PAGE_SUFFIX = '?myCate=0&sort=1&p={page}' WORK_SUFFIX = '/work/content/show?p=1&objectId={objid}' COLLECTION_SUFFIX = '/collection/contents?id={objid}&p={page}&pageSize=25' USER_API = 'https://www.zcool.com.cn/member/card/{id}' TIMEOUT = 30 Q_TIMEOUT = 1 MAX_WORKERS = 20 RETRIES = 3 thread_local = threading.local() def get_session(): """使线程获取同一个 Session,可减少 TCP 连接数,加速请求。 :return requests.Session: session """ if not hasattr(thread_local, "session"): thread_local.session = requests.Session() return thread_local.session @retry(Exception, tries=RETRIES) def session_request(url: str, method: str = 'GET') -> requests.Response: """使用 session 请求数据。使用了装饰器 retry,在网络异常导致错误时会重试。 :param str url: 目标请求 URL :param str method: 请求方式 :return requests.Response: 响应数据 """ resp = get_session().request(method, url, headers=HEADERS, timeout=TIMEOUT) resp.raise_for_status() return resp class ZCoolScraper(): def __init__(self, user_id=None, username=None, collection=None, destination=None, max_pages=None, spec_topics=None, max_topics=None, max_workers=None, retries=None, redownload=None, overwrite=False, thumbnail=False): """初始化下载参数。 :param int user_id: 用户 ID :param str username: 用户名 :param HttpUrl collection: 收藏集 URL :param str destination: 图片保存到本地的路径,默认当前路径 :param int max_pages: 最大爬取页数,默认所有 :param list spec_topics: 需要下载的特定主题 :param int max_topics: 最大下载主题数量,默认所有 :param int max_workers: 线程开启个数,默认 20 :param int retries: 请求异常时的重试次数,默认 3 :param str redownload: 下载记录文件,给定此文件则从失败记录进行下载 :param bool overwrite: 是否覆盖已存在的文件,默认 False :param bool thumbnail: 是否下载缩略图,默认 False """ self.start_time = datetime.now() print(f' - - - - - -+-+ {self.start_time.ctime()} +-+- - - - - -\n') self.collection = collection self.spec_topics = spec_topics self.max_topics = max_topics or 'all' self.max_workers = max_workers or MAX_WORKERS self.pool = ThreadPoolExecutor(self.max_workers) self.overwrite = overwrite self.thumbnail = thumbnail self.pages = Queue() self.topics = Queue() self.images = Queue() self.stat = { 'npages': 0, 'ntopics': 0, 'nimages': 0, 'pages_pass': set(), 'pages_fail': set(), 'topics_pass': set(), 'topics_fail': set(), 'images_pass': set(), 'images_fail': set() } if retries: # 重置全局变量 RETRIES global RETRIES RETRIES = retries dest = Path(destination or '', urlparse(HOST_PAGE).netloc) # 从记录文件中的失败项开始下载 if redownload: self.username = self.reload_records(redownload) self.user_id = self.search_id_by_username(self.username) self.max_pages = self.pages.qsize() self.max_topics = self.topics.qsize() self.directory = dest / safe_filename(self.username) self.stat.update({ 'npages': self.max_pages, 'ntopics': self.max_topics, 'nimages': self.images.qsize() }) print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages:2d}\n' f'{"Topics to scrapy".rjust(17)}: {self.max_topics:3d}\n' f'{"Images to scrapy".rjust(17)}: {self.images.qsize():4d}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.fetch_all(initialized=True) return # 从收藏集下载 if collection: objid = self.parse_objid(collection, is_collection=True) resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=1))) data = resp.json().get('data', {}) total = data.get('total', 0) page_size = data.get('pageable', {}).get('pageSize') max_pages_ = math.ceil(total / page_size) self.max_pages = min(max_pages or 9999, max_pages_) self.directory = dest / safe_filename(f'{self.username}-{self._collection_name}') self.parse_collection_topics(data.get('content')) # 解析第 2 页 至 最大页的 topic 到下载任务 for page in range(2, self.max_pages + 1): resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=page))) self.parse_collection_topics(topics=resp.json().get('data', {}).get('content'), offset=page_size * (page - 1)) # 根据用户 ID 或用户名下载 else: self.user_id = user_id or self.search_id_by_username(username) self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id)) try: response = session_request(self.base_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {self.base_url}, {e}', 'red') sys.exit(1) soup = BeautifulSoup(markup=response.text, features='html.parser') try: author = soup.find(name='div', id='body').get('data-name') if username and username != author: cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red') sys.exit(1) self.username = author except Exception: self.username = username or 'anonymous' self.directory = dest / safe_filename(self.username) try: max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text) except Exception: max_pages_ = 1 self.max_pages = min(max_pages or 9999, max_pages_) if self.spec_topics: topics = ', '.join(self.spec_topics) elif self.max_topics == 'all': topics = 'all' else: topics = self.max_pages * self.max_topics print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Maximum pages".rjust(17)}: {max_pages_}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n' f'{"Topics to scrapy".rjust(17)}: {topics}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.END_PARSING_TOPICS = False self.fetch_all(initialized=True if self.collection else False) def search_id_by_username(self, username): """通过用户昵称查找用户 ID。 :param str username: 用户昵称 :return int: 用户 ID """ if not username: cprint('Must give an <user id> or <username>!', 'yellow') sys.exit(1) search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username)) try: response = session_request(search_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {search_url}, {e}', 'red') sys.exit(1) author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info') if (not author_1st) or (author_1st.get('data-name') != username): cprint(f'Username「{username}」does not exist!', 'yellow') sys.exit(1) return author_1st.get('data-id') def reload_records(self, file): """从本地下载记录里读取下载失败的内容。 :param str file: 下载记录文件的路径。 :return str: 用户名 """ with open(file, 'r', encoding='utf-8') as f: for fail in json.loads(f.read()).get('fail'): scrapy = Scrapy._make(fail.values()) if scrapy.type == 'page': self.pages.put(scrapy) elif scrapy.type == 'topic': self.topics.put(scrapy) elif scrapy.type == 'image': self.images.put(scrapy) return scrapy.author def generate_pages(self): """根据最大下载页数,生成需要爬取主页的任务。""" for page in range(1, self.max_pages + 1): suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX url = urljoin(self.base_url, suffix.format(page=page)) scrapy = Scrapy(type='page', author=self.username, title=page, objid=None, index=page - 1, url=url) if scrapy not in self.stat["pages_pass"]: self.pages.put(scrapy) def parse_collection_topics(self, topics: List[dict], offset: int = 0): for idx, topic in enumerate(topics): new_scrapy = Scrapy(type='topic', author=topic.get('creatorObj', {}).get('username'), title=topic.get('title'), objid=topic.get('id'), index=offset + idx, url=topic.get('pageUrl')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1
:return Scrapy: 记录任务信息的数据体 """ resp = session_request(scrapy.url) cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover') for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]): title = card.get('title') if self.spec_topics and (title not in self.spec_topics): continue new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title, objid=None, index=idx, url=card.get('href')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 return scrapy def fetch_topics(self): """从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。""" page_futures = {} while True: try: scrapy = self.pages.get(timeout=Q_TIMEOUT) page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy except Empty: break except Exception: continue for future in as_completed(page_futures): scrapy = page_futures.get(future) try: future.result() self.stat["pages_pass"].add(scrapy) except Exception: self.stat["pages_fail"].add(scrapy) cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red') self.END_PARSING_TOPICS = True def parse_objid(self, url: str, is_collection: bool = False) -> str: """根据 topic 页面解析 objid :param url: topic 或 collection 的 URL :return: objid """ soup = BeautifulSoup(session_request(url).text, 'html.parser') objid = soup.find('input', id='dataInput').attrs.get('data-objid') if is_collection: self._collection_name = soup.find('h2', class_='title-h2').text user = soup.find(name='span', class_='details-user-avatar') self.user_id = user.find('div').attrs.get('data-id') self.username = user.find('a').attrs.get('title') return objid def parse_images(self, scrapy): """爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息, 并将下载图片的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ objid = scrapy.objid or self.parse_objid(scrapy.url) resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid))) data = resp.json().get('data', {}) author = data.get('product', {}).get('creatorObj', {}).get('username') title = data.get('product', {}).get('title') objid = data.get('product', {}).get('id') for img in data.get('allImageList', []): new_scrapy = Scrapy(type='image', author=author, title=title, objid=objid, index=img.get('orderNo') or 0, url=img.get('url')) if new_scrapy not in self.stat["images_pass"]: self.images.put(new_scrapy) self.stat["nimages"] += 1 return scrapy def fetch_images(self): """从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。""" image_futures = {} while True: try: scrapy = self.topics.get(timeout=Q_TIMEOUT) image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy except Empty: if self.END_PARSING_TOPICS: break except Exception: continue for future in as_completed(image_futures): scrapy = image_futures.get(future) try: future.result() self.stat["topics_pass"].add(scrapy) except Exception: self.stat["topics_fail"].add(scrapy) cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red') def fetch_all(self, initialized: bool = False): """同时爬取主页、主题,并更新状态。""" if not initialized: self.generate_pages() fetch_futures = [self.pool.submit(self.fetch_topics), self.pool.submit(self.fetch_images)] end_show_fetch = False t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch}) t.start() try: wait(fetch_futures) except KeyboardInterrupt: raise finally: end_show_fetch = True t.join() def show_fetch_status(self, interval=0.5, end=None): """用于后台线程,实现边爬取边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format( pages=colored(str(self.max_pages).rjust(3), 'blue'), topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'), images=colored(str(self.stat["nimages"]).rjust(5), 'blue')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): print('\n') break time.sleep(interval) def show_download_status(self, interval=0.5, end=None): """用于后台线程,实现边下载边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"]) if self.stat["nimages"] > 0: status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format( time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'), failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'), completed=colored(str(int(completed / self.stat["nimages"] * 100)) + f'% ({completed}/{self.stat["nimages"]})', 'green')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): if self.stat["nimages"] > 0: print('\n') break time.sleep(interval) def download_image(self, scrapy): """下载图片保存到本地。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ try: name = re.findall(r'(?<=/)\w*?\.(?:jpg|gif|png|bmp)', scrapy.url, re.IGNORECASE)[0] except IndexError: name = uuid4().hex + '.jpg' path = self.directory / safe_filename(scrapy.title) filename = path / f'[{scrapy.index + 1 or 0:02d}]{name}' if (not self.overwrite) and op.isfile(filename): return scrapy url = scrapy.url if self.thumbnail: if url.lower().endswith(('jpg', 'png', 'bmp')): url = f'{scrapy.url}@1280w_1l_2o_100sh.{url[-3:]}' resp = session_request(url) mkdirs_if_not_exist(path) with open(filename, 'wb') as f: for chunk in resp.iter_content(8192): f.write(chunk) return scrapy def save_records(self): """将成功及失败的下载记录保存到本地文件。 :return str: 记录文件的路径 """ filename = f'{safe_filename(self.start_time.isoformat()[:-7])}.json' abspath = op.abspath(self.directory / filename) with open(abspath, 'w', encoding='utf-8') as f: success = (self.stat["pages_pass"] | self.stat["topics_pass"] | self.stat["images_pass"]) fail = (self.stat["pages_fail"] | self.stat["topics_fail"] | self.stat["images_fail"]) type_order = {'page': 1, 'topic': 2, 'image': 3} s_ordered = sort_records(success, order=type_order) f_ordered = sort_records(fail, order=type_order) records = { 'time': self.start_time.isoformat(), 'success': [scrapy._asdict() for scrapy in s_ordered], 'fail': [scrapy._asdict() for scrapy in f_ordered] } f.write(json.dumps(records, ensure_ascii=False, indent=2)) return abspath def run_scraper(self): """使用多线程下载所有图片,完成后保存记录并退出程序。""" end_show_download = False t = threading.Thread(target=self.show_download_status, kwargs={'end': lambda: end_show_download}) t.start() image_futuress = {} while True: try: scrapy = self.images.get_nowait() if scrapy not in self.stat["images_pass"]: image_futuress[self.pool.submit(self.download_image, scrapy)] = scrapy except Empty: break except KeyboardInterrupt: raise except Exception: continue try: for future in as_completed(image_futuress): scrapy = image_futuress.get(future) try: future.result() self.stat["images_pass"].add(scrapy) except Exception: self.stat["images_fail"].add(scrapy) cprint(f'Download image: {scrapy.title}[{scrapy.index + 1}] ' f'({scrapy.url}) failed.', 'red') except KeyboardInterrupt: raise finally: end_show_download = True t.join() saved_images = len(self.stat["images_pass"]) failed_images = len(self.stat["images_fail"]) if saved_images or failed_images: if saved_images: print(f'Saved {colored(saved_images, "green")} images to ' f'{colored(self.directory.absolute(), attrs=["underline"])}') records_path = self.save_records() print(f'Saved records to {colored(records_path, attrs=["underline"])}') else: cprint('No images to download.', 'yellow')
def parse_topics(self, scrapy): """爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体
random_line_split
zcooldl.py
# @Author: lonsty # @Date: 2019-09-07 18:34:18 import json import math import os.path as op import re import sys import threading import time from collections import namedtuple from concurrent.futures import ThreadPoolExecutor, as_completed, wait from datetime import datetime from pathlib import Path from queue import Empty, Queue from typing import List from urllib.parse import urljoin, urlparse from uuid import uuid4 import requests from bs4 import BeautifulSoup from termcolor import colored, cprint from zcooldl.utils import (mkdirs_if_not_exist, retry, safe_filename, sort_records) Scrapy = namedtuple('Scrapy', 'type author title objid index url') # 用于记录下载任务 HEADERS = { 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 ' '(KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36' } HOST_PAGE = 'https://www.zcool.com.cn' SEARCH_DESIGNER_SUFFIX = '/search/designer?&word={word}' USER_SUFFIX = '/u/{id}' PAGE_SUFFIX = '?myCate=0&sort=1&p={page}' WORK_SUFFIX = '/work/content/show?p=1&objectId={objid}' COLLECTION_SUFFIX = '/collection/contents?id={objid}&p={page}&pageSize=25' USER_API = 'https://www.zcool.com.cn/member/card/{id}' TIMEOUT = 30 Q_TIMEOUT = 1 MAX_WORKERS = 20 RETRIES = 3 thread_local = threading.local() def get_session(): """使线程获取同一个 Session,可减少 TCP 连接数,加速请求。 :return requests.Session: session """ if not hasattr(thread_local, "session"): thread_local.session = requests.Session() return thread_local.session @retry(Exception, tries=RETRIES) def session_request(url: str, method: str = 'GET') -> requests.Response: """使用 session 请求数据。使用了装饰器 retry,在网络异常导致错误时会重试。 :param str url: 目标请求 URL :param str method: 请求方式 :return requests.Response: 响应数据 """ resp = get_session().request(method, url, headers=HEADERS, timeout=TIMEOUT) resp.raise_for_status() return resp class ZCoolScraper(): def __init__(self, user_id=None, username=None, collection=None, destination=None, max_pages=None, spec_topics=None, max_topics=None, max_workers=None, retries=None, redownload=None, overwrite=False, thumbnail=False): """初始化下载参数。 :param int user_id: 用户 ID :param str username: 用户名 :param HttpUrl collection: 收藏集 URL :param str destination: 图片保存到本地的路径,默认当前路径 :param int max_pages: 最大爬取页数,默认所有 :param list spec_topics: 需要下载的特定主题 :param int max_topics: 最大下载主题数量,默认所有 :param int max_workers: 线程开启个数,默认 20 :param int retries: 请求异常时的重试次数,默认 3 :param str redownload: 下载记录文件,给定此文件则从失败记录进行下载 :param bool overwrite: 是否覆盖已存在的文件,默认 False :param bool thumbnail: 是否下载缩略图,默认 False """ self.start_time = datetime.now() print(f' - - - - - -+-+ {self.start_time.ctime()} +-+- - - - - -\n') self.collection = collection self.spec_topics = spec_topics self.max_topics = max_topics or 'all' self.max_workers = max_workers or MAX_WORKERS self.pool = ThreadPoolExecutor(self.max_workers) self.overwrite = overwrite self.thumbnail = thumbnail self.pages = Queue() self.topics = Queue() self.images = Queue() self.stat = { 'npages': 0, 'ntopics': 0, 'nimages': 0, 'pages_pass': set(), 'pages_fail': set(), 'topics_pass': set(), 'topics_fail': set(), 'images_pass': set(), 'images_fail': set() } if retries: # 重置全局变量 RETRIES global RETRIES RETRIES = retries dest = Path(destination or '', urlparse(HOST_PAGE).netloc) # 从记录文件中的失败项开始下载 if redownload: self.username = self.reload_records(redownload) self.user_id = self.search_id_by_username(self.username) self.max_pages = self.pages.qsize() self.max_topics = self.topics.qsize() self.directory = dest / safe_filename(self.username) self.stat.update({ 'npages': self.max_pages, 'ntopics': self.max_topics, 'nimages': self.images.qsize() }) print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages:2d}\n' f'{"Topics to scrapy".rjust(17)}: {self.max_topics:3d}\n' f'{"Images to scrapy".rjust(17)}: {self.images.qsize():4d}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.fetch_all(initialized=True) return # 从收藏集下载 if collection: objid = self.parse_objid(collection, is_collection=True) resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=1))) data = resp.json().get('data', {}) total = data.get('total', 0) page_size = data.get('pageable', {}).get('pageSize') max_pages_ = math.ceil(total / page_size) self.max_pages = min(max_pages or 9999, max_pages_) self.directory = dest / safe_filename(f'{self.username}-{self._collection_name}') self.parse_collection_topics(data.get('content')) # 解析第 2 页 至 最大页的 topic 到下载任务 for page in range(2, self.max_pages + 1): resp = session_request(urljoin(HOST_PAGE, COLLECTION_SUFFIX.format(objid=objid, page=page))) self.parse_collection_topics(topics=resp.json().get('data', {}).get('content'), offset=page_size * (page - 1)) # 根据用户 ID 或用户名下载 else: self.user_id = user_id or self.search_id_by_username(username) self.base_url = urljoin(HOST_PAGE, USER_SUFFIX.format(id=self.user_id)) try: response = session_request(self.base_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {self.base_url}, {e}', 'red') sys.exit(1) soup = BeautifulSoup(markup=response.text, features='html.parser') try: author = soup.find(name='div', id='body').get('data-name') if username and username != author: cprint(f'Invalid user id:「{user_id}」or username:「{username}」!', 'red') sys.exit(1) self.username = author except Exception: self.username = username or 'anonymous' self.directory = dest / safe_filename(self.username) try: max_pages_ = int(soup.find(id='laypage_0').find_all(name='a')[-2].text) except Exception: max_pages_ = 1 self.max_pages = min(max_pages or 9999, max_pages_) if self.spec_topics: topics = ', '.join(self.spec_topics) elif self.max_topics == 'all': topics = 'all' else: topics = self.max_pages * self.max_topics print(f'{"Username".rjust(17)}: {colored(self.username, "cyan")}\n' f'{"User ID".rjust(17)}: {self.user_id}\n' f'{"Maximum pages".rjust(17)}: {max_pages_}\n' f'{"Pages to scrapy".rjust(17)}: {self.max_pages}\n' f'{"Topics to scrapy".rjust(17)}: {topics}\n' f'Storage directory: {colored(self.directory, attrs=["underline"])}', end='\n\n') self.END_PARSING_TOPICS = False self.fetch_all(initialized=True if self.collection else False) def search_id_by_username(self, username): """通过用户昵称查找用户 ID。 :param str username: 用户昵称 :return int: 用户 ID """ if not username: cprint('Must give an <user id> or <username>!', 'yellow') sys.exit(1) search_url = urljoin(HOST_PAGE, SEARCH_DESIGNER_SUFFIX.format(word=username)) try: response = session_request(search_url) except requests.exceptions.ProxyError: cprint('Cannot connect to proxy.', 'red') sys.exit(1) except Exception as e: cprint(f'Failed to connect to {search_url}, {e}', 'red') sys.exit(1) author_1st = BeautifulSoup(response.text, 'html.parser').find(name='div', class_='author-info') if (not author_1st) or (author_1st.get('data-name') != username): cprint(f'Username「{username}」does not exist!', 'yellow') sys.exit(1) return author_1st.get('data-id') def reload_records(self, file): """从本地下载记录里读取下载失败的内容。 :param str file: 下载记录文件的路径。 :return str: 用户名 """ with open(file, 'r', encoding='utf-8') as f: for fail in json.loads(f.read()).get('fail'): scrapy = Scrapy._make(fail.values()) if scrapy.type == 'page': self.pages.put(scrapy) elif scrapy.type == 'topic': self.topics.put(scrapy) elif scrapy.type == 'image': self.images.put(scrapy) return scrapy.author def generate_pages(self): """根据最大下载页数,生成需要爬取主页的任务。""" for page in range(1, self.max_pages + 1): suffix = COLLECTION_SUFFIX if self.collection else PAGE_SUFFIX url = urljoin(self.base_url, suffix.format(page=page)) scrapy = Scrapy(type='page', author=self.username, title=page, objid=None, index=page - 1, url=url) if scrapy not in self.stat["pages_pass"]: self.pages.put(scrapy) def parse_collection_topics(self, topics: List[dict], offset: int = 0): for idx, topic in enumerate(topics): new_scrapy = Scrapy(type='topic', author=topic.get('creatorObj', {}).get('username'), title=topic.get('title'), objid=topic.get('id'), index=offset + idx, url=topic.get('pageUrl')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 def parse_topics(self, scrapy): """爬取主页,解析所有 topic,并将爬取主题的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ resp = session_request(scrapy.url) cards = BeautifulSoup(resp.text, 'html.parser').find_all(name='a', class_='card-img-hover') for idx, card in enumerate(cards if self.max_topics == 'all' else cards[:self.max_topics + 1]): title = card.get('title') if self.spec_topics and (title not in self.spec_topics): continue new_scrapy = Scrapy(type='topic', author=scrapy.author, title=title, objid=None, index=idx, url=card.get('href')) if new_scrapy not in self.stat["topics_pass"]: self.topics.put(new_scrapy) self.stat["ntopics"] += 1 return scrapy def fetch_topics(self): """从任务队列中获取要爬取的主页,使用多线程处理得到需要爬取的主题。""" page_futures = {} while True: try: scrapy = self.pages.get(timeout=Q_TIMEOUT) page_futures[self.pool.submit(self.parse_topics, scrapy)] = scrapy except Empty: break except Exception: continue for future in as_completed(page_futures): scrapy = page_futures.get(future) try: future.result() self.stat["pages_pass"].add(scrapy) except Exception: self.stat["pages_fail"].add(scrapy) cprint(f'GET page: {scrapy.title} ({scrapy.url}) failed.', 'red') self.END_PARSING_TOPICS = True def parse_objid(self, url: str, is_collection: bool = False) -> str: """根据 topic 页面解析 objid :param url: topic 或 collection 的 URL :return: objid """ soup = BeautifulSoup(session_request(url).text, 'html.parser') objid = soup.find('input', id='dataInput').attrs.get('data-objid') if is_collection: self._collection_name = soup.find('h2', class_='title-h2').text user = soup.find(name='span', class_='details-user-avatar') self.user_id = user.find('div').attrs.get('data-id') self.username = user.find('a').attrs.get('title') return objid def parse_images(self, scrapy): """爬取 topic,获得 objid 后直接调用 API,从返回数据里获得图片地址等信息, 并将下载图片的任务添加到任务队列。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ objid = scrapy.objid or self.parse_objid(scrapy.url) resp = session_request(urljoin(HOST_PAGE, WORK_SUFFIX.format(objid=objid))) data = resp.json().get('data', {}) author = data.get('product', {}).get('creatorObj', {}).get('username') title = data.get('product', {}).get('title') objid = data.get('product', {}).get('id') for img in data.get('allImageList', []): new_scrapy = Scrapy(type='image', author=author, title=title, objid=objid, index=img.get('orderNo') or 0, url=img.get('url')) if new_scrapy not in self.stat["images_pass"]: self.images.put(new_scrapy) self.stat["nimages"] += 1 return scrapy def fetch_images(self): """从任务队列中获取要爬取的主题,使用多线程处理得到需要下载的图片。""" image_futures = {} while True: try: scrapy = self.topics.get(timeout=Q_TIMEOUT) image_futures[self.pool.submit(self.parse_images, scrapy)] = scrapy except Empty: if self.END_PARSING_TOPICS: break except Exception: continue for future in as_completed(image_futures): scrapy = image_futures.get(future) try: future.result() self.stat["topics_pass"].add(scrapy) except Exception: self.stat["topics_fail"].add(scrapy) cprint(f'GET topic: {scrapy.title} ({scrapy.url}) failed.', 'red') def fetch_all(self, initialized: bool = False): """同时爬取主页、主题,并更新状态。""" if not initialized: self.generate_pages() fetch_futures = [self.pool.submit(self.fetch_topics), self.pool.submit(self.fetch_images)] end_show_fetch = False t = threading.Thread(target=self.show_fetch_status, kwargs={'end': lambda: end_show_fetch}) t.start() try: wait(fetch_futures) except KeyboardInterrupt: raise finally: end_show_fetch = True t.join() def show_fetch_status(self, interval=0.5, end=None): """用于后台线程,实现边爬取边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: status = 'Fetched Pages: {pages}\tTopics: {topics}\tImages: {images}'.format( pages=colored(str(self.max_pages).rjust(3), 'blue'), topics=colored(str(self.stat["ntopics"]).rjust(3), 'blue'), images=colored(str(self.stat["nimages"]).rjust(5), 'blue')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): print('\n') break time.sleep(interval) def show_download_status(self, interval=0.5, end=None): """用于后台线程,实现边下载边显示状态。 :param int interval: 状态更新间隔,秒 :param function end: 用于控制退出线程 """ while True: completed = len(self.stat["images_pass"]) + len(self.stat["images_fail"]) if self.stat["nimages"] > 0: status = 'Time used: {time_used}\tFailed: {failed}\tCompleted: {completed}'.format( time_used=colored(str(datetime.now() - self.start_time)[:-7], 'yellow'), failed=colored(str(len(self.stat["images_fail"])).rjust(3), 'red'), completed=colored(str(int(completed / self.stat["nimages"] * 100)) + f'% ({completed}/{self.stat["nimages"]})', 'green')) print(status, end='\r', flush=True) if (interval == 0) or (end and end()): if self.stat["nimages"] > 0: print('\n') break time.sleep(interval) def download_image(self, scrapy): """下载图片保存到本地。 :param scrapy: 记录任务信息的数据体 :return Scrapy: 记录任务信息的数据体 """ try: name = re.findall(r'(?<=/)\w*?\.(?:jpg|gif|png|bmp)', scrapy.url, re.IGNORECASE)[0] except IndexError: name = uuid4().hex + '.jpg' path = self.directory / safe_filename(scrapy.title) filename = path / f'[{scrapy.index + 1 or 0:02d}]{name}' if (not self.overwrite) and op.isfile(filename): return scrapy url = scrapy.url if self.thumbnail: if url.lower().endswith(('jpg', 'png', 'bmp')): url = f'{scrapy.url}@1280w_1l_2o_100sh.{url[-3:]}' resp = session_request(url) mkdirs_if_not_exist(path) with open(filename, 'wb') as f: for chunk in resp.iter_content(8192): f.write(chunk) return scrapy def save_records(self): """将成功及失败的下载记录保存到本地文件。 :return str: 记录文件的路径 """ filename = f'{safe_filename(self.start_time.isoformat()[:-7])}.json' abspath = op.abspath(self.directory / filename) with open(abspath, 'w', encoding='utf-8') as f: success = (self.stat["pages_pass"] | self.stat["topics_pass"] | self.stat["images_pass"]) fail = (self.stat["pages_fail"] | self.stat["topics_fail"] | self.stat["images_fail"]) type_order = {'page': 1, 'topic': 2, 'image': 3} s_ordered = sort_records(success, order=type_order) f_ordered = sort_records(fail, order=type_order) records = { 'time': self.start_time.isoformat(), 'success': [scrapy._asdict() for scrapy in s_ordered], 'fail': [scrapy._asdict() for scrapy in f_ordered] } f.write(json.dumps(records, ensure_ascii=False, indent=2)) return abspath def run_scraper(self): """使用多线程下载所有图片,完成后保存记录并退出程序。""" end_show_download = False t = threading.Thread(target=self.show_download_status, kwargs={'end': lambda: end_show_download}) t.start() image_futuress = {}
e True: try: scrapy = self.images.get_nowait() if scrapy not in self.stat["images_pass"]: image_futuress[self.pool.submit(self.download_image, scrapy)] = scrapy except Empty: break except KeyboardInterrupt: raise except Exception: continue try: for future in as_completed(image_futuress): scrapy = image_futuress.get(future) try: future.result() self.stat["images_pass"].add(scrapy) except Exception: self.stat["images_fail"].add(scrapy) cprint(f'Download image: {scrapy.title}[{scrapy.index + 1}] ' f'({scrapy.url}) failed.', 'red') except KeyboardInterrupt: raise finally: end_show_download = True t.join() saved_images = len(self.stat["images_pass"]) failed_images = len(self.stat["images_fail"]) if saved_images or failed_images: if saved_images: print(f'Saved {colored(saved_images, "green")} images to ' f'{colored(self.directory.absolute(), attrs=["underline"])}') records_path = self.save_records() print(f'Saved records to {colored(records_path, attrs=["underline"])}') else: cprint('No images to download.', 'yellow')
whil
identifier_name
script.js
'user strict'; const content = [ { title : "Бразилія", subtitle : "Фернандо де Норонья", imgSrc : "images/Brazil.jpg", imgAlt : "Brazil", paragraphs: [ "Головний острів, Фернанду-ді-Норонья, за яким отримав назву весь архіпелаг, має площу 26 км² і розміри 10 км завдовжки і 3,5 км завширшки. Його базою є масивна вулканічна формація за 750 м під рівнем моря. Головний острів забирає 91 % загальної площі архіпелагу, решта припадає на острови Рата, Села-Жинета, Кабелуда, Сан-Жозе і острівці Лену і Віува.", "Клімат тропічний із двома чітко визначеними сезонами: дощовим — з січня по серпень, і посушливим протягом решти року. На жаль, первинна рослинність на островах була в минулому знищена, і зараз вони покриті заростями чагарників і в'юнків. Численних туристів приваблює багата флора і фауна довколишнього моря, зокрема морські черепахи і дельфіни, а також різноманіття морських птахів." ] }, { title : "Гонконг", subtitle : "Будда, острів Лантау", imgSrc : "images/HongKong.jpg", imgAlt : "HongKong Budha photo", paragraphs : [ "Основна визначна пам'ятка китайського району Гонконг – великий Будда. Цьому пам'ятнику в тутешніх місцях приділено особливу увагу. Його будівництво почалося 1990-го року і тривало протягом трьох років. 1993-го відбулося офіційне відкриття статуї, зробленої з бронзи. В даний час вона, як і раніше, зберігає звання найбільшої у всьому світі. Обличчя її вкрите золотом, що додає витонченості до цього створення.", "Висота статуї – 34 метри, а вона знаходиться на 482-метровій горі. Дістатися цього туристичного об'єкта можна, подолавши 268 сходинок. Зробивши це, ви максимально наблизитесь до пам'ятника. Перед самою триповерховою статуєю можна помітити 6 невеликих статуй, що підносять їй підношення. Вважається, що це гонконгське надбання символізує зв'язок між людиною і природою." ] }, { title : "Китай", subtitle : "Національний парк Цзючжайгоу", imgSrc : "images/China.jpg", imgAlt : "China national park", paragraphs : [ "Національний парк Цзючжайгоу у перекладі. «Долина дев'яти сіл» — заповідник на півночі провінції Сичуань у центральному Китаї. Відомий своїми багаторівневими водоспадами та кольоровими озерами, оголошений ЮНЕСКО всесвітньою спадщиною у 1992 році. Належить до категорії V (заповідні ландшафти) із систематизації заповідних зон IUCN.", "Цзючжайгоу складається з трьох долин, що утворюють разом Y-подібну фігуру. Ущелини Жицзе і Цзечава спрямовані з півдня і сходяться в центрі, де вони утворюють ущелину Шучжен, спрямовану північ до гирла долини. Район цих ущелин пов'язаний 55 км доріг для рейсових автобусів, а також гатями та маленькими павільйонами. Гаті зазвичай перебувають на протилежній автомобільній дорозі стороні озер, що захищає їх від руйнування колесами автобусів." ] }, { title : "США", subtitle : "Національний заповідник Гіффорд Пінчот, Вашингтон", imgSrc : "images/USA_gifford.jpg", imgAlt : "USA gifford nature reserve", paragraphs : [ "Національний Вулканічний Пам'ятник Гіффорд Пінчот (Gifford Pinchot) займає величезну площу – 5537,3 квадратних кілометрів у штаті Вашингтон, США. На території природоохоронного об'єкта знаходяться гори та ліси, озера та річки, але головним об'єктом все ж таки залишається вулкан Сент-Хеленс. Саме Вулкан Святої Олени дав Національному парку таку незвичайну назву.", "На околицях вулкана Сент-Хеленс розташована Лава поляна – величезна територія, що зберегла наслідки багатовікових вивержень. Багатосотметрові лавові труби тут є сусідами з глибокими звивистими печерами. Найвідоміша – Мавпа печера, куди туристи допускаються лише у спеціальному спелеологічному спорядженні. Мавпа печера – має найбільшу протяжність серед лавових печер США – майже 400 метрів." ] }, { title : "Канада", subtitle : "Озеро Морейн", imgSrc : "images/Moraine_Lake_Canada.jpg", imgAlt : "Moraine Lake Canada", paragraphs : [ "Озеро Морейн підживлюється льодовиком і досягає свого повного наповнення лише у другій половині червня. Головною принадою озера Морейн є синій колір води. Коли воно наповнене, в ньому відбиваються різні відтінки синього кольору через заломлення світла на кам'янистому дні озера.", "Поверхня озера Морейн відображає, як в дзеркалі, десять величезних канадських піків, і це робить його одним з найкрасивіших місць в Канаді. Це одне з найбільш часто фотографованих місць в Канаді, що дуже славиться красою мальовничих місць і своїми пішохідними екскурсіями." ] }, { title : "Франція", subtitle : "Етрета, Нормандія", imgSrc : "images/etretat_normandy_france.jpg", imgAlt : "etretat normandy france", paragraphs : [ "Étretat (Етрета) - невеликий курорт з прекрасними гальковими пляжами на узбережжі Алебастрового Ла Манша, одна з головних визначних пам'яток Нормандії, на півночі Франції. Він розташований біля самого пляжу, де води Англійського каналу сформували дивовижну красу прямовисні скелі з природними арками — один із найкрасивіших краєвидів Франції.", "" ] }, { title : "Гренландія", subtitle : "Льодяний каньйон", imgSrc : "images/Grenland_canyon.jpg", imgAlt : "Greenland canyon", paragraphs : [ "Цей воістину дивовижний острів притягує мандрівників з усього світу, як магніт. Аналогія банальна, але краще вигадати складно. Значна частина Гренландії справді схована під надійним щитом із льоду, який останнім часом починає активно танути. Це результат глобального потепління, на яке вже багато років намагаються звернути увагу багато громадських організацій.", "У льодовиках утворюються численні каньйони - їх краса просто приголомшує. Таке враження, що природа, як найталановитіший художник на цій планеті, малює своїм пензлем хитромудрі, часом навіть сюрреалістичні картини." ] }, { title : "Норвегія", subtitle : "Місто - Олесунн", imgSrc : "images/Alesund_norway.jpg", imgAlt : "Alesund norway", paragraphs : [ "О́лесунн (норв. Ålesund) — місто і порт на заході Норвегії. Розташоване на північ від гирла фіорду Стур. Місто розкинулося на декількох островах, а саме — Норвьойя, Аспьойя, Гейсса (Гесса) та Окснойя, які є з'єднані мостами. За легендою, поселення бере свій початок у 9 столітті коли Ролло (Рольф) заснував поблизу маєток, де мешкав війт. Статус міста Олесунн отримав в 1848 р. Після того як місто зазнало пожежі в 1904 р., воно було відбудоване у оригінальному стилі арт-нуво.", "Олесунн — регіональний торговий центр і туристична база для маршрутів в регіонах Суннмьоре, долині Норанґ, льодовиках Ойє та островах Рунде та Ґіске. В місті знаходиться найбільша риболовна гавань Норвегії, пристановище для риболовних трейлерів що виловлюють тріску та палтус з виду камбалових. Разом з Тромсьо, Олесунн є осередком ловлі арктичних тюленів." ] } ]; const mainContentBlock = document.querySelectorAll('.main__content-block'); const navigationButton = document.querySelectorAll('.main__navigation-button'); const humbergerBtn = document.querySelector('.header__hamburger'), adaptiveTabsMenu = document.querySelector('.adaptive'), adaptiveNavTabsBtn = document.querySelectorAll('.adaptive__navigation-button'), body = document.querySelector('body'); document.addEventListener('DOMContentLoaded', startEventsForPage()); function startEventsForPage() { createElements(); hamburgerMenu(); currentTabButton('main__navigation-button_active', '.main__navigation-list'); mainContentBlock[0].classList.add('main__content-active'); function currentTabButton(tabSelector, parrentSelector) { const parrentMenuWrap = document.querySelector(parrentSelector); navigationButton[0].classList.add('main__navigation-button_active'); parrentMenuWrap.addEventListener('click', e => { let index = +e.target.getAttribute("data-number"); navigationButton.forEach(item => { item.classList.remove('main__navigation-button_active'); }); mainContentBlock.forEach(item => { item.classList.remove('main__content-active'); }); e.target.classList.add(tabSelector); deleteInnerHtml(); createElements(index); mainContentBlock[index].classList.add('main__content-active'); }); }; function createElements (index = 0) { mainContentBlock[index].innerHTML = ` <h2 class="main__content-title">${content[index].title}</h2> <p class="main__content-subtitle">${content[index].subtitle}</p> <img class="main__content-image" src="${content[index].imgSrc}" alt="${content[index].imgAlt}"> <p class="main__content-descr">${content[index].paragraphs[0]}</p> <p class="main__content-descr">${content[index].paragraphs[1]}</p> `; }; function deleteInnerHtml() { mainContentBlock.forEach(element => { element.innerHTML = ""; }); }; function hamburgerMenu() { adaptiveNavTabsBtn[0].classList.add('adaptive__navigation-button__active'); openContentWithTab('adaptive__navigation-button__active','adaptive__navigation-list'); humbergerBtn.addEventListener('click', e => { e.currentTarget.classList.toggle('header__hamburger-active'); body.classList.toggle('body__active'); adaptiveTabsMenu.classList.toggle('adaptive__active'); }); }; function openContentWithTab(activeTabSelector, parrentSelector) { const parrentMenuWrap = document.querySelector('.' + parrentSelector); parrentMenuWrap.addEventListener('click', e => { let index = +e.target.getAttribute("data-number"); mainContentBlock.forEach(element => { element.classList.remove('main__content-active'); }); adaptiveNavTabsBtn.forEach(element => { element.classList.remove(activeTabSelector); }); deleteInnerHtml(); createElements(index); adaptiveTabsMenu.classList.toggle('adaptive__active'); body.classList.toggle('body__active'); humbergerBtn.classList.remove('header__hamburger-active'); mainContentBlock[index].classList.add('main__content-active'); adaptiveNavTabsBtn[index].classList.add(activeTabSelector); }); }; }
identifier_body
script.js
'user strict'; const content = [ { title : "Бразилія", subtitle : "Фернандо де Норонья", imgSrc : "images/Brazil.jpg", imgAlt : "Brazil", paragraphs: [ "Головний острів, Фернанду-ді-Норонья, за яким отримав назву весь архіпелаг, має площу 26 км² і розміри 10 км завдовжки і 3,5 км завширшки. Його базою є масивна вулканічна формація за 750 м під рівнем моря. Головний острів забирає 91 % загальної площі архіпелагу, решта припадає на острови Рата, Села-Жинета, Кабелуда, Сан-Жозе і острівці Лену і Віува.", "Клімат тропічний із двома чітко визначеними сезонами: дощовим — з січня по серпень, і посушливим протягом решти року. На жаль, первинна рослинність на островах була в минулому знищена, і зараз вони покриті заростями чагарників і в'юнків. Численних туристів приваблює багата флора і фауна довколишнього моря, зокрема морські черепахи і дельфіни, а також різноманіття морських птахів." ] }, { title : "Гонконг", subtitle : "Будда, острів Лантау", imgSrc : "images/HongKong.jpg", imgAlt : "HongKong Budha photo", paragraphs : [ "Основна визначна пам'ятка китайського району Гонконг – великий Будда. Цьому пам'ятнику в тутешніх місцях приділено особливу увагу. Його будівництво почалося 1990-го року і тривало протягом трьох років. 1993-го відбулося офіційне відкриття статуї, зробленої з бронзи. В даний час вона, як і раніше, зберігає звання найбільшої у всьому світі. Обличчя її вкрите золотом, що додає витонченості до цього створення.", "Висота статуї – 34 метри, а вона знаходиться на 482-метровій горі. Дістатися цього туристичного об'єкта можна, подолавши 268 сходинок. Зробивши це, ви максимально наблизитесь до пам'ятника. Перед самою триповерховою статуєю можна помітити 6 невеликих статуй, що підносять їй підношення. Вважається, що це гонконгське надбання символізує зв'язок між людиною і природою." ] }, { title : "Китай", subtitle : "Національний парк Цзючжайгоу", imgSrc : "images/China.jpg", imgAlt : "China national park", paragraphs : [ "Національний парк Цзючжайгоу у перекладі. «Долина дев'яти сіл» — заповідник на півночі провінції Сичуань у центральному Китаї. Відомий своїми багаторівневими водоспадами та кольоровими озерами, оголошений ЮНЕСКО всесвітньою спадщиною у 1992 році. Належить до категорії V (заповідні ландшафти) із систематизації заповідних зон IUCN.", "Цзючжайгоу складається з трьох долин, що утворюють разом Y-подібну фігуру. Ущелини Жицзе і Цзечава спрямовані з півдня і сходяться в центрі, де вони утворюють ущелину Шучжен, спрямовану північ до гирла долини. Район цих ущелин пов'язаний 55 км доріг для рейсових автобусів, а також гатями та маленькими павільйонами. Гаті зазвичай перебувають на протилежній автомобільній дорозі стороні озер, що захищає їх від руйнування колесами автобусів." ] }, { title : "США", subtitle : "Національний заповідник Гіффорд Пінчот, Вашингтон", imgSrc : "images/USA_gifford.jpg", imgAlt : "USA gifford nature reserve", paragraphs : [ "Національний Вулканічний Пам'ятник Гіффорд Пінчот (Gifford Pinchot) займає величезну площу – 5537,3 квадратних кілометрів у штаті Вашингтон, США. На території природоохоронного об'єкта знаходяться гори та ліси, озера та річки, але головним об'єктом все ж таки залишається вулкан Сент-Хеленс. Саме Вулкан Святої Олени дав Національному парку таку незвичайну назву.", "На околицях вулкана Сент-Хеленс розташована Лава поляна – величезна територія, що зберегла наслідки багатовікових вивержень. Багатосотметрові лавові труби тут є сусідами з глибокими звивистими печерами. Найвідоміша – Мавпа печера, куди туристи допускаються лише у спеціальному спелеологічному спорядженні. Мавпа печера – має найбільшу протяжність серед лавових печер США – майже 400 метрів." ] }, { title : "Канада", subtitle : "Озеро Морейн", imgSrc : "images/Moraine_Lake_Canada.jpg", imgAlt : "Moraine Lake Canada", paragraphs : [ "Озеро Морейн підживлюється льодовиком і досягає свого повного наповнення лише у другій половині червня. Головною принадою озера Морейн є синій колір води. Коли воно наповнене, в ньому відбиваються різні відтінки синього кольору через заломлення світла на кам'янистому дні озера.", "Поверхня озера Морейн відображає, як в дзеркалі, десять величезних канадських піків, і це робить його одним з найкрасивіших місць в Канаді. Це одне з найбільш часто фотографованих місць в Канаді, що дуже славиться красою мальовничих місць і своїми пішохідними екскурсіями." ] }, { title : "Франція", subtitle : "Етрета, Нормандія", imgSrc : "images/etretat_normandy_france.jpg", imgAlt : "etretat normandy france", paragraphs : [ "Étretat (Етрета) - невеликий курорт з прекрасними гальковими пляжами на узбережжі Алебастрового Ла Манша, одна з головних визначних пам'яток Нормандії, на півночі Франції. Він розташований біля самого пляжу, де води Англійського каналу сформували дивовижну красу прямовисні скелі з природними арками — один із найкрасивіших краєвидів Франції.", "" ] }, { title : "Гренландія", subtitle : "Льодяний каньйон", imgSrc : "images/Grenland_canyon.jpg", imgAlt : "Greenland canyon", paragraphs : [ "Цей воістину дивовижний острів притягує мандрівників з усього світу, як магніт. Аналогія банальна, але краще вигадати складно. Значна частина Гренландії справді схована під надійним щитом із льоду, який останнім часом починає активно танути. Це результат глобального потепління, на яке вже багато років намагаються звернути увагу багато громадських організацій.", "У льодовиках утворюються численні каньйони - їх краса просто приголомшує. Таке враження, що природа, як найталановитіший художник на цій планеті, малює своїм пензлем хитромудрі, часом навіть сюрреалістичні картини." ] }, { title : "Норвегія", subtitle : "Місто - Олесунн", imgSrc : "images/Alesund_norway.jpg", imgAlt : "Alesund norway", paragraphs : [ "О́лесунн (норв. Ålesund) — місто і порт на заході Норвегії. Розташоване на північ від гирла фіорду Стур. Місто розкинулося на декількох островах, а саме — Норвьойя, Аспьойя, Гейсса (Гесса) та Окснойя, які є з'єднані мостами. За легендою, поселення бере свій початок у 9 столітті коли Ролло (Рольф) заснував поблизу маєток, де мешкав війт. Статус міста Олесунн отримав в 1848 р. Після того як місто зазнало пожежі в 1904 р., воно було відбудоване у оригінальному стилі арт-нуво.", "Олесунн — регіональний торговий центр і туристична база для маршрутів в регіонах Суннмьоре, долині Норанґ, льодовиках Ойє та островах Рунде та Ґіске. В місті знаходиться найбільша риболовна гавань Норвегії, пристановище для риболовних трейлерів що виловлюють тріску та палтус з виду камбалових. Разом з Тромсьо, Олесунн є осередком ловлі арктичних тюленів." ] } ]; const mainContentBlock = document.querySelectorAll('.main__content-block'); const navigationButton = document.querySelectorAll('.main__navigation-button'); const humbergerBtn = document.querySelector('.header__hamburger'), adaptiveTabsMenu = document.querySelector('.adaptive'), adaptiveNavTabsBtn = document.querySelectorAll('.adaptive__navigation-button'), body = document.querySelector('body'); document.addEventListener('DOMContentLoaded', startEventsForPage()); function startEventsForPage() { createElements(); hamburgerMenu(); currentTabButton('main__navigation-button_active', '.main__navigation-list'); mainContentBlock[0].classList.add('main__content-active'); function currentTabButton(tabSelector, parrentSelector) { const parrentMenuWrap = document.querySelector(parrentSelector); navigationButton[0].classList.add('main__navigation-button_active'); parrentMenuWrap.addEventListener('click', e => { let index = +e.target.getAttribute("data-number"); navigationButton.forEach(item => { item.classList.remove('main__navigation-button_active'); }); mainContentBlock.forEach(item => { item.classList.remove('main__content-active'); }); e.target.classList.add(tabSelector); deleteInnerHtml(); createElements(index); mainContentBlock[index].classList.add('main__content-active'); }); }; function createElements (index = 0) { mainContentBlock[index].innerHTML = ` <h2 class="main__content-title">${content[index].title}</h2> <p class="main__content-subtitle">${content[index].subtitle}</p> <img class="main__content-image" src="${content[index].imgSrc}" alt="${content[index].imgAlt}"> <p class="main__content-descr">${content[index].paragraphs[0]}</p> <p class="main__content-descr">${content[index].paragraphs[1]}</p> `; }; function deleteInnerHtml() { mainContentBlock.forEach(element => { element.innerHTML = ""; }); }; function hamburgerMenu() { adaptiveNavTabsBtn[0].classList.add('adaptive__navigation-button__active'); openContentWithTab('adaptive__navigation-button__active','adaptive__navigation-list'); humbergerBtn.addEventListener('click', e => { e.currentTarget.classList.toggle('header__hamburger-active'); body.classList.toggle('body__active'); adaptiveTabsMenu.classList.toggle('adaptive__active'); }); }; function openContentWithTab(activeTabSelector, parrentSelector) { const parrentMenuWrap = document.querySelector('.' + parrentSelector); parrentMenuWrap.addEventListener('click', e => { let index = +e.target.getAttribute("data-number"); mainContentBlock.forEach(element => { element.classList.remove('main__content-active'); }); adaptiveNavTabsBtn.forEach(element => { element.classList.remove(activeTabSelector); }); deleteInnerHtml(); createElements(index); adaptiveTabsMenu.classList.toggle('adaptive__active'); body.classList.toggle('body__active'); humbergerBtn.classList.remove('header__hamburger-active'); mainContentBlock[index].classList.add('main__content-active'); adaptiveNavTabsBtn[index].classList.add(activeTabSelector); }); }; }
identifier_name
script.js
'user strict'; const content = [ { title : "Бразилія", subtitle : "Фернандо де Норонья", imgSrc : "images/Brazil.jpg", imgAlt : "Brazil", paragraphs: [ "Головний острів, Фернанду-ді-Норонья, за яким отримав назву весь архіпелаг, має площу 26 км² і розміри 10 км завдовжки і 3,5 км завширшки. Його базою є масивна вулканічна формація за 750 м під рівнем моря. Головний острів забирає 91 % загальної площі архіпелагу, решта припадає на острови Рата, Села-Жинета, Кабелуда, Сан-Жозе і острівці Лену і Віува.", "Клімат тропічний із двома чітко визначеними сезонами: дощовим — з січня по серпень, і посушливим протягом решти року. На жаль, первинна рослинність на островах була в минулому знищена, і зараз вони покриті заростями чагарників і в'юнків. Численних туристів приваблює багата флора і фауна довколишнього моря, зокрема морські черепахи і дельфіни, а також різноманіття морських птахів." ] }, { title : "Гонконг", subtitle : "Будда, острів Лантау", imgSrc : "images/HongKong.jpg", imgAlt : "HongKong Budha photo", paragraphs : [ "Основна визначна пам'ятка китайського району Гонконг – великий Будда. Цьому пам'ятнику в тутешніх місцях приділено особливу увагу. Його будівництво почалося 1990-го року і тривало протягом трьох років. 1993-го відбулося офіційне відкриття статуї, зробленої з бронзи. В даний час вона, як і раніше, зберігає звання найбільшої у всьому світі. Обличчя її вкрите золотом, що додає витонченості до цього створення.", "Висота статуї – 34 метри, а вона знаходиться на 482-метровій горі. Дістатися цього туристичного об'єкта можна, подолавши 268 сходинок. Зробивши це, ви максимально наблизитесь до пам'ятника. Перед самою триповерховою статуєю можна помітити 6 невеликих статуй, що підносять їй підношення. Вважається, що це гонконгське надбання символізує зв'язок між людиною і природою." ] }, { title : "Китай", subtitle : "Національний парк Цзючжайгоу", imgSrc : "images/China.jpg", imgAlt : "China national park", paragraphs : [ "Національний парк Цзючжайгоу у перекладі. «Долина дев'яти сіл» — заповідник на півночі провінції Сичуань у центральному Китаї. Відомий своїми багаторівневими водоспадами та кольоровими озерами, оголошений ЮНЕСКО всесвітньою спадщиною у 1992 році. Належить до категорії V (заповідні ландшафти) із систематизації заповідних зон IUCN.", "Цзючжайгоу складається з трьох долин, що утворюють разом Y-подібну фігуру. Ущелини Жицзе і Цзечава спрямовані з півдня і сходяться в центрі, де вони утворюють ущелину Шучжен, спрямовану північ до гирла долини. Район цих ущелин пов'язаний 55 км доріг для рейсових автобусів, а також гатями та маленькими павільйонами. Гаті зазвичай перебувають на протилежній автомобільній дорозі стороні озер, що захищає їх від руйнування колесами автобусів." ] }, { title : "США", subtitle : "Національний заповідник Гіффорд Пінчот, Вашингтон", imgSrc : "images/USA_gifford.jpg", imgAlt : "USA gifford nature reserve", paragraphs : [ "Національний Вулканічний Пам'ятник Гіффорд Пінчот (Gifford Pinchot) займає величезну площу – 5537,3 квадратних кілометрів у штаті Вашингтон, США. На території природоохоронного об'єкта знаходяться гори та ліси, озера та річки, але головним об'єктом все ж таки залишається вулкан Сент-Хеленс. Саме Вулкан Святої Олени дав Національному парку таку незвичайну назву.", "На околицях вулкана Сент-Хеленс розташована Лава поляна – величезна територія, що зберегла наслідки багатовікових вивержень. Багатосотметрові лавові труби тут є сусідами з глибокими звивистими печерами. Найвідоміша – Мавпа печера, куди туристи допускаються лише у спеціальному спелеологічному спорядженні. Мавпа печера – має найбільшу протяжність серед лавових печер США – майже 400 метрів." ] }, { title : "Канада", subtitle : "Озеро Морейн", imgSrc : "images/Moraine_Lake_Canada.jpg", imgAlt : "Moraine Lake Canada", paragraphs : [ "Озеро Морейн підживлюється льодовиком і досягає свого повного наповнення лише у другій половині червня. Головною принадою озера Морейн є синій колір води. Коли воно наповнене, в ньому відбиваються різні відтінки синього кольору через заломлення світла на кам'янистому дні озера.", "Поверхня озера Морейн відображає, як в дзеркалі, десять величезних канадських піків, і це робить його одним з найкрасивіших місць в Канаді. Це одне з найбільш часто фотографованих місць в Канаді, що дуже славиться красою мальовничих місць і своїми пішохідними екскурсіями." ] }, { title : "Франція", subtitle : "Етрета, Нормандія", imgSrc : "images/etretat_normandy_france.jpg", imgAlt : "etretat normandy france", paragraphs : [ "Étretat (Етрета) - невеликий курорт з прекрасними гальковими пляжами на узбережжі Алебастрового Ла Манша, одна з головних визначних пам'яток Нормандії, на півночі Франції. Він розташований біля самого пляжу, де води Англійського каналу сформували дивовижну красу прямовисні скелі з природними арками — один із найкрасивіших краєвидів Франції.", "" ] }, { title : "Гренландія", subtitle : "Льодяний каньйон", imgSrc : "images/Grenland_canyon.jpg", imgAlt : "Greenland canyon", paragraphs : [ "Цей воістину дивовижний острів притягує мандрівників з усього світу, як магніт. Аналогія банальна, але краще вигадати складно. Значна частина Гренландії справді схована під надійним щитом із льоду, який останнім часом починає активно танути. Це результат глобального потепління, на яке вже багато років намагаються звернути увагу багато громадських організацій.", "У льодовиках утворюються численні каньйони - їх краса просто приголомшує. Таке враження, що природа, як найталановитіший художник на цій планеті, малює своїм пензлем хитромудрі, часом навіть сюрреалістичні картини." ] }, { title : "Норвегія", subtitle : "Місто - Олесунн", imgSrc : "images/Alesund_norway.jpg", imgAlt : "Alesund norway", paragraphs : [ "О́лесунн (норв. Ålesund) — місто і порт на заході Норвегії. Розташоване на північ від гирла фіорду Стур. Місто розкинулося на декількох островах, а саме — Норвьойя, Аспьойя, Гейсса (Гесса) та Окснойя, які є з'єднані мостами. За легендою, поселення бере свій початок у 9 столітті коли Ролло (Рольф) заснував поблизу маєток, де мешкав війт. Статус міста Олесунн отримав в 1848 р. Після того як місто зазнало пожежі в 1904 р., воно було відбудоване у оригінальному стилі арт-нуво.", "Олесунн — регіональний торговий центр і туристична база для маршрутів в регіонах Суннмьоре, долині Норанґ, льодовиках Ойє та островах Рунде та Ґіске. В місті знаходиться найбільша риболовна гавань Норвегії, пристановище для риболовних трейлерів що виловлюють тріску та палтус з виду камбалових. Разом з Тромсьо, Олесунн є осередком ловлі арктичних тюленів." ] } ]; const mainContentBlock = document.querySelectorAll('.main__content-block'); const navigationButton = document.querySelectorAll('.main__navigation-button'); const humbergerBtn = document.querySelector('.header__hamburger'), adaptiveTabsMenu = document.querySelector('.adaptive'), adaptiveNavTabsBtn = document.querySelectorAll('.adaptive__navigation-button'), body = document.querySelector('body'); document.addEventListener('DOMContentLoaded', startEventsForPage()); function startEventsForPage() { createElements(); hamburgerMenu(); currentTabButton('main__navigation-button_active', '.main__navigation-list'); mainContentBlock[0].classList.add('main__content-active'); function currentTabButton(tabSelector, parrentSelector) { const parrentMenuWrap = document.querySelector(parrentSelector); navigationButton[0].classList.add('main__navigation-button_active'); parrentMenuWrap.addEventListener('click', e => { let index = +e.target.getAttribute("data-number"); navigationButton.forEach(item => { item.classList.remove('main__navigation-button_active'); }); mainContentBlock.forEach(item => { item.classList.remove('main__content-active'); }); e.target.classList.add(tabSelector); deleteInnerHtml(); createElements(index); mainContentBlock[index].classList.add('main__content-active'); }); }; function createElements (index = 0) { mainContentBlock[index].innerHTML = ` <h2 class="main__content-title">${content[index].title}</h2> <p class="main__content-subtitle">${content[index].subtitle}</p> <img class="main__content-image" src="${content[index].imgSrc}" alt="${content[index].imgAlt}"> <p class="main__content-descr">${content[index].paragraphs[0]}</p> <p class="main__content-descr">${content[index].paragraphs[1]}</p> `; }; function deleteInnerHtml() { mainContentBlock.forEach(element => { element.innerHTML = ""; }); }; function hamburgerMenu() { adaptiveNavTabsBtn[0].classList.add('adaptive__navigation-button__active'); openContentWithTab('adaptive__navigation-button__active','adaptive__navigation-list'); humbergerBtn.addEventListener('click', e => { e.currentTarget.classList.toggle('header__hamburger-active'); body.classList.toggle('body__active'); adaptiveTabsMenu.classList.toggle('adaptive__active'); }); }; function openContentWithTab(activeTabSelector, parrentSelector) { const parrentMenuWrap = document.querySelector('.' + parrentSelector);
mainContentBlock.forEach(element => { element.classList.remove('main__content-active'); }); adaptiveNavTabsBtn.forEach(element => { element.classList.remove(activeTabSelector); }); deleteInnerHtml(); createElements(index); adaptiveTabsMenu.classList.toggle('adaptive__active'); body.classList.toggle('body__active'); humbergerBtn.classList.remove('header__hamburger-active'); mainContentBlock[index].classList.add('main__content-active'); adaptiveNavTabsBtn[index].classList.add(activeTabSelector); }); }; }
parrentMenuWrap.addEventListener('click', e => { let index = +e.target.getAttribute("data-number");
random_line_split
reco_tracks.py
#!/usr/bin/env python from pdb import set_trace as br from operator import itemgetter from numpy.polynomial.polynomial import Polynomial from modules.utils import OUT_CONFIG from modules.geometry.hit import HitManager from modules.geometry.sl import SL from modules.geometry.segment import Segment from modules.geometry import Geometry, COOR_ID from modules.reco import config, plot from modules.analysis import config as CONFIGURATION import os import itertools import bokeh import numpy as np ############################################# INPUT ARGUMENTS import argparse parser = argparse.ArgumentParser(description='Track reconstruction from input hits.') parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+') parser.add_argument('-f', '--format', help='Input hits format', default='time_wire') parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False) parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15) parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html') parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False) parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+') args = parser.parse_args() # Checking validity of the input format if args.format not in OUT_CONFIG: raise ValueError('Wrong input format (-f) specified') # Checking existence of input files for file_path in args.inputs: if not os.path.exists(os.path.expandvars(file_path)): print('--- ERROR ---') print(' \''+file_path+'\' file not found') print(' please provide the correct path to the file containing raw hits' ) print() exit() def
(input_files): """Reconstruct tracks from hits in all events from the provided input files""" n_words_event = len(OUT_CONFIG['event']['fields']) n_words_hit = len(OUT_CONFIG[args.format]['fields']) # Initialising event event = -1 G = Geometry(CONFIGURATION) H = HitManager() SLs = {} for iSL in config.SL_SHIFT.keys(): SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL]) # Defining which SLs should be plotted in which global view GLOBAL_VIEW_SLs = { 'xz': [SLs[0], SLs[2]], 'yz': [SLs[1], SLs[3]] } # Analyzing the hits in each event for file_path in input_files: # Reading input file line by line with open(file_path, 'r') as file_in: file_line_nr = 0 for line in file_in: file_line_nr += 1 if file_line_nr <= 1: continue hits_lst = [] H.reset() words = line.strip().split() event = int(words[0]) # Skipping event if it was not specified in command line if args.events is not None and event not in args.events: continue nhits = int(words[1]) print('Event {0:<5d} # hits: {1:d}'.format(event, nhits)) if args.glance: continue # Skipping event with too many hits (most likely a spark event that will take forever to process) if nhits > args.max_hits: continue # Extracting hit information for iHit in range(nhits): start = n_words_event + iHit*n_words_hit ww = words[start:start+n_words_hit] hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])]) H.add_hits(hits_lst) # Removing hits with time outside the timebox region H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True) # Calculating local+global hit positions H.calc_pos(SLs) # Creating figures of the chambers figs = {} figs['sl'] = plot.book_chambers_figure(G) figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs) # Analyzing hits in each SL sl_fit_results = {} for iSL, sl in SLs.items(): # print('- SL', iSL) hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer') if args.plot: # Drawing the left and right hits in local frame figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5, fill_color='red', fill_alpha=0.7, line_width=0) figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5, fill_color='blue', fill_alpha=0.7, line_width=0) # Performing track reconstruction in the local frame sl_fit_results[iSL] = [] layer_groups = hits_sl.groupby('layer').groups n_layers = len(layer_groups) # Stopping if lass than 3 layers of hits if n_layers < config.NHITS_MIN_LOCAL: continue hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()] # Building the list of all possible hit combinations with 1 hit from each layer hits_layered = list(itertools.product(*hitid_layers)) # Building more combinations using only either left or right position of each hit for hit_ids in hits_layered: # print('- -', hit_ids) posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values posx_combs = list(itertools.product(*posx)) # Fitting each combination fit_results_lr = [] fit_range = (min(posz), max(posz)) for iC, posx_comb in enumerate(posx_combs): pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range) chi2 = stats[0][0] / n_layers if chi2 < config.FIT_CHI2_MAX: a0, a1 = pfit fit_results_lr.append((chi2, hit_ids, pfit)) # Keeping only the best fit result from the given set of physical hits fit_results_lr.sort(key=itemgetter(0)) if fit_results_lr: sl_fit_results[iSL].append(fit_results_lr[0]) # Sorting the fit results of a SL by Chi2 sl_fit_results[iSL].sort(key=itemgetter(0)) if sl_fit_results[iSL]: # Drawing fitted tracks posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32) for iR, res in enumerate(sl_fit_results[iSL][:5]): col = config.TRACK_COLORS[iR] posx = res[2](posz) figs['sl'][iSL].line(x=posx, y=posz, line_color=col, line_alpha=0.7, line_width=3) if args.plot: # Drawing the left and right hits in global frame for view, sls in GLOBAL_VIEW_SLs.items(): sl_ids = [sl.id for sl in sls] hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)] figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]], fill_color='red', fill_alpha=0.7, line_width=0) figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]], fill_color='blue', fill_alpha=0.7, line_width=0) # Building 3D segments from the fit results in each SL posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32) for sl in sls: for iR, res in enumerate(sl_fit_results[sl.id][:5]): posx = res[2](posz) start = (posx[0], 0, posz[0]) end = (posx[1], 0, posz[1]) segL = Segment(start, end) segG = segL.fromSL(sl) segG.calc_vector() # Extending the global segment to the full height of the view start = segG.pointAtZ(plot.PLOT_RANGE['y'][0]) end = segG.pointAtZ(plot.PLOT_RANGE['y'][1]) # Getting XY coordinates of the global segment for the current view iX = COOR_ID[view[0]] posx = [start[iX], end[iX]] posy = [start[2], end[2]] # Drawing the segment col = config.TRACK_COLORS[sl.id] figs['global'][view].line(x=posx, y=posy, line_color=col, line_alpha=0.7, line_width=3) print(sl.id, iR, posx, posy) # Storing the figures to an HTML file if args.plot: plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]] plots.append([figs['global'][v] for v in ['xz', 'yz']]) bokeh.io.output_file(args.output.format(event), mode='cdn') bokeh.io.save(bokeh.layouts.layout(plots)) process(args.inputs)
process
identifier_name
reco_tracks.py
#!/usr/bin/env python from pdb import set_trace as br from operator import itemgetter from numpy.polynomial.polynomial import Polynomial from modules.utils import OUT_CONFIG from modules.geometry.hit import HitManager from modules.geometry.sl import SL from modules.geometry.segment import Segment from modules.geometry import Geometry, COOR_ID from modules.reco import config, plot from modules.analysis import config as CONFIGURATION import os import itertools import bokeh import numpy as np ############################################# INPUT ARGUMENTS import argparse parser = argparse.ArgumentParser(description='Track reconstruction from input hits.') parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+') parser.add_argument('-f', '--format', help='Input hits format', default='time_wire') parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False) parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15) parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html') parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False) parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+') args = parser.parse_args() # Checking validity of the input format if args.format not in OUT_CONFIG: raise ValueError('Wrong input format (-f) specified') # Checking existence of input files for file_path in args.inputs: if not os.path.exists(os.path.expandvars(file_path)): print('--- ERROR ---') print(' \''+file_path+'\' file not found') print(' please provide the correct path to the file containing raw hits' ) print() exit() def process(input_files):
process(args.inputs)
"""Reconstruct tracks from hits in all events from the provided input files""" n_words_event = len(OUT_CONFIG['event']['fields']) n_words_hit = len(OUT_CONFIG[args.format]['fields']) # Initialising event event = -1 G = Geometry(CONFIGURATION) H = HitManager() SLs = {} for iSL in config.SL_SHIFT.keys(): SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL]) # Defining which SLs should be plotted in which global view GLOBAL_VIEW_SLs = { 'xz': [SLs[0], SLs[2]], 'yz': [SLs[1], SLs[3]] } # Analyzing the hits in each event for file_path in input_files: # Reading input file line by line with open(file_path, 'r') as file_in: file_line_nr = 0 for line in file_in: file_line_nr += 1 if file_line_nr <= 1: continue hits_lst = [] H.reset() words = line.strip().split() event = int(words[0]) # Skipping event if it was not specified in command line if args.events is not None and event not in args.events: continue nhits = int(words[1]) print('Event {0:<5d} # hits: {1:d}'.format(event, nhits)) if args.glance: continue # Skipping event with too many hits (most likely a spark event that will take forever to process) if nhits > args.max_hits: continue # Extracting hit information for iHit in range(nhits): start = n_words_event + iHit*n_words_hit ww = words[start:start+n_words_hit] hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])]) H.add_hits(hits_lst) # Removing hits with time outside the timebox region H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True) # Calculating local+global hit positions H.calc_pos(SLs) # Creating figures of the chambers figs = {} figs['sl'] = plot.book_chambers_figure(G) figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs) # Analyzing hits in each SL sl_fit_results = {} for iSL, sl in SLs.items(): # print('- SL', iSL) hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer') if args.plot: # Drawing the left and right hits in local frame figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5, fill_color='red', fill_alpha=0.7, line_width=0) figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5, fill_color='blue', fill_alpha=0.7, line_width=0) # Performing track reconstruction in the local frame sl_fit_results[iSL] = [] layer_groups = hits_sl.groupby('layer').groups n_layers = len(layer_groups) # Stopping if lass than 3 layers of hits if n_layers < config.NHITS_MIN_LOCAL: continue hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()] # Building the list of all possible hit combinations with 1 hit from each layer hits_layered = list(itertools.product(*hitid_layers)) # Building more combinations using only either left or right position of each hit for hit_ids in hits_layered: # print('- -', hit_ids) posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values posx_combs = list(itertools.product(*posx)) # Fitting each combination fit_results_lr = [] fit_range = (min(posz), max(posz)) for iC, posx_comb in enumerate(posx_combs): pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range) chi2 = stats[0][0] / n_layers if chi2 < config.FIT_CHI2_MAX: a0, a1 = pfit fit_results_lr.append((chi2, hit_ids, pfit)) # Keeping only the best fit result from the given set of physical hits fit_results_lr.sort(key=itemgetter(0)) if fit_results_lr: sl_fit_results[iSL].append(fit_results_lr[0]) # Sorting the fit results of a SL by Chi2 sl_fit_results[iSL].sort(key=itemgetter(0)) if sl_fit_results[iSL]: # Drawing fitted tracks posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32) for iR, res in enumerate(sl_fit_results[iSL][:5]): col = config.TRACK_COLORS[iR] posx = res[2](posz) figs['sl'][iSL].line(x=posx, y=posz, line_color=col, line_alpha=0.7, line_width=3) if args.plot: # Drawing the left and right hits in global frame for view, sls in GLOBAL_VIEW_SLs.items(): sl_ids = [sl.id for sl in sls] hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)] figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]], fill_color='red', fill_alpha=0.7, line_width=0) figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]], fill_color='blue', fill_alpha=0.7, line_width=0) # Building 3D segments from the fit results in each SL posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32) for sl in sls: for iR, res in enumerate(sl_fit_results[sl.id][:5]): posx = res[2](posz) start = (posx[0], 0, posz[0]) end = (posx[1], 0, posz[1]) segL = Segment(start, end) segG = segL.fromSL(sl) segG.calc_vector() # Extending the global segment to the full height of the view start = segG.pointAtZ(plot.PLOT_RANGE['y'][0]) end = segG.pointAtZ(plot.PLOT_RANGE['y'][1]) # Getting XY coordinates of the global segment for the current view iX = COOR_ID[view[0]] posx = [start[iX], end[iX]] posy = [start[2], end[2]] # Drawing the segment col = config.TRACK_COLORS[sl.id] figs['global'][view].line(x=posx, y=posy, line_color=col, line_alpha=0.7, line_width=3) print(sl.id, iR, posx, posy) # Storing the figures to an HTML file if args.plot: plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]] plots.append([figs['global'][v] for v in ['xz', 'yz']]) bokeh.io.output_file(args.output.format(event), mode='cdn') bokeh.io.save(bokeh.layouts.layout(plots))
identifier_body
reco_tracks.py
#!/usr/bin/env python from pdb import set_trace as br from operator import itemgetter from numpy.polynomial.polynomial import Polynomial from modules.utils import OUT_CONFIG from modules.geometry.hit import HitManager from modules.geometry.sl import SL from modules.geometry.segment import Segment from modules.geometry import Geometry, COOR_ID from modules.reco import config, plot from modules.analysis import config as CONFIGURATION import os import itertools import bokeh import numpy as np ############################################# INPUT ARGUMENTS import argparse parser = argparse.ArgumentParser(description='Track reconstruction from input hits.') parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+') parser.add_argument('-f', '--format', help='Input hits format', default='time_wire') parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False) parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15) parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html') parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False) parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+') args = parser.parse_args() # Checking validity of the input format if args.format not in OUT_CONFIG: raise ValueError('Wrong input format (-f) specified') # Checking existence of input files for file_path in args.inputs: if not os.path.exists(os.path.expandvars(file_path)): print('--- ERROR ---') print(' \''+file_path+'\' file not found') print(' please provide the correct path to the file containing raw hits' ) print() exit() def process(input_files): """Reconstruct tracks from hits in all events from the provided input files""" n_words_event = len(OUT_CONFIG['event']['fields']) n_words_hit = len(OUT_CONFIG[args.format]['fields']) # Initialising event event = -1 G = Geometry(CONFIGURATION) H = HitManager() SLs = {} for iSL in config.SL_SHIFT.keys(): SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL]) # Defining which SLs should be plotted in which global view GLOBAL_VIEW_SLs = { 'xz': [SLs[0], SLs[2]], 'yz': [SLs[1], SLs[3]] } # Analyzing the hits in each event for file_path in input_files: # Reading input file line by line
file_line_nr += 1 if file_line_nr <= 1: continue hits_lst = [] H.reset() words = line.strip().split() event = int(words[0]) # Skipping event if it was not specified in command line if args.events is not None and event not in args.events: continue nhits = int(words[1]) print('Event {0:<5d} # hits: {1:d}'.format(event, nhits)) if args.glance: continue # Skipping event with too many hits (most likely a spark event that will take forever to process) if nhits > args.max_hits: continue # Extracting hit information for iHit in range(nhits): start = n_words_event + iHit*n_words_hit ww = words[start:start+n_words_hit] hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])]) H.add_hits(hits_lst) # Removing hits with time outside the timebox region H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True) # Calculating local+global hit positions H.calc_pos(SLs) # Creating figures of the chambers figs = {} figs['sl'] = plot.book_chambers_figure(G) figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs) # Analyzing hits in each SL sl_fit_results = {} for iSL, sl in SLs.items(): # print('- SL', iSL) hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer') if args.plot: # Drawing the left and right hits in local frame figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5, fill_color='red', fill_alpha=0.7, line_width=0) figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5, fill_color='blue', fill_alpha=0.7, line_width=0) # Performing track reconstruction in the local frame sl_fit_results[iSL] = [] layer_groups = hits_sl.groupby('layer').groups n_layers = len(layer_groups) # Stopping if lass than 3 layers of hits if n_layers < config.NHITS_MIN_LOCAL: continue hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()] # Building the list of all possible hit combinations with 1 hit from each layer hits_layered = list(itertools.product(*hitid_layers)) # Building more combinations using only either left or right position of each hit for hit_ids in hits_layered: # print('- -', hit_ids) posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values posx_combs = list(itertools.product(*posx)) # Fitting each combination fit_results_lr = [] fit_range = (min(posz), max(posz)) for iC, posx_comb in enumerate(posx_combs): pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range) chi2 = stats[0][0] / n_layers if chi2 < config.FIT_CHI2_MAX: a0, a1 = pfit fit_results_lr.append((chi2, hit_ids, pfit)) # Keeping only the best fit result from the given set of physical hits fit_results_lr.sort(key=itemgetter(0)) if fit_results_lr: sl_fit_results[iSL].append(fit_results_lr[0]) # Sorting the fit results of a SL by Chi2 sl_fit_results[iSL].sort(key=itemgetter(0)) if sl_fit_results[iSL]: # Drawing fitted tracks posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32) for iR, res in enumerate(sl_fit_results[iSL][:5]): col = config.TRACK_COLORS[iR] posx = res[2](posz) figs['sl'][iSL].line(x=posx, y=posz, line_color=col, line_alpha=0.7, line_width=3) if args.plot: # Drawing the left and right hits in global frame for view, sls in GLOBAL_VIEW_SLs.items(): sl_ids = [sl.id for sl in sls] hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)] figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]], fill_color='red', fill_alpha=0.7, line_width=0) figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]], fill_color='blue', fill_alpha=0.7, line_width=0) # Building 3D segments from the fit results in each SL posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32) for sl in sls: for iR, res in enumerate(sl_fit_results[sl.id][:5]): posx = res[2](posz) start = (posx[0], 0, posz[0]) end = (posx[1], 0, posz[1]) segL = Segment(start, end) segG = segL.fromSL(sl) segG.calc_vector() # Extending the global segment to the full height of the view start = segG.pointAtZ(plot.PLOT_RANGE['y'][0]) end = segG.pointAtZ(plot.PLOT_RANGE['y'][1]) # Getting XY coordinates of the global segment for the current view iX = COOR_ID[view[0]] posx = [start[iX], end[iX]] posy = [start[2], end[2]] # Drawing the segment col = config.TRACK_COLORS[sl.id] figs['global'][view].line(x=posx, y=posy, line_color=col, line_alpha=0.7, line_width=3) print(sl.id, iR, posx, posy) # Storing the figures to an HTML file if args.plot: plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]] plots.append([figs['global'][v] for v in ['xz', 'yz']]) bokeh.io.output_file(args.output.format(event), mode='cdn') bokeh.io.save(bokeh.layouts.layout(plots)) process(args.inputs)
with open(file_path, 'r') as file_in: file_line_nr = 0 for line in file_in:
random_line_split
reco_tracks.py
#!/usr/bin/env python from pdb import set_trace as br from operator import itemgetter from numpy.polynomial.polynomial import Polynomial from modules.utils import OUT_CONFIG from modules.geometry.hit import HitManager from modules.geometry.sl import SL from modules.geometry.segment import Segment from modules.geometry import Geometry, COOR_ID from modules.reco import config, plot from modules.analysis import config as CONFIGURATION import os import itertools import bokeh import numpy as np ############################################# INPUT ARGUMENTS import argparse parser = argparse.ArgumentParser(description='Track reconstruction from input hits.') parser.add_argument('-E', '--events', metavar='N', help='Events to process', type=int, default=None, nargs='+') parser.add_argument('-f', '--format', help='Input hits format', default='time_wire') parser.add_argument('-g', '--glance', help='Only show # hits in each event', action='store_true', default=False) parser.add_argument('-m', '--max_hits', dest='max_hits', type=int, help='Maximum number of hits allowed in one event [default: 15]', action='store', default=15) parser.add_argument('-o', '--output', help='Output path', default='plots/hits_ev{0:d}.html') parser.add_argument('-p', '--plot', help='Draw plots', action='store_true', default=False) parser.add_argument('inputs', metavar='FILE', help='Input files with raw hits, 1 event/line', nargs='+') args = parser.parse_args() # Checking validity of the input format if args.format not in OUT_CONFIG: raise ValueError('Wrong input format (-f) specified') # Checking existence of input files for file_path in args.inputs: if not os.path.exists(os.path.expandvars(file_path)): print('--- ERROR ---') print(' \''+file_path+'\' file not found') print(' please provide the correct path to the file containing raw hits' ) print() exit() def process(input_files): """Reconstruct tracks from hits in all events from the provided input files""" n_words_event = len(OUT_CONFIG['event']['fields']) n_words_hit = len(OUT_CONFIG[args.format]['fields']) # Initialising event event = -1 G = Geometry(CONFIGURATION) H = HitManager() SLs = {} for iSL in config.SL_SHIFT.keys(): SLs[iSL] = SL(iSL, config.SL_SHIFT[iSL], config.SL_ROTATION[iSL]) # Defining which SLs should be plotted in which global view GLOBAL_VIEW_SLs = { 'xz': [SLs[0], SLs[2]], 'yz': [SLs[1], SLs[3]] } # Analyzing the hits in each event for file_path in input_files: # Reading input file line by line with open(file_path, 'r') as file_in: file_line_nr = 0 for line in file_in: file_line_nr += 1 if file_line_nr <= 1: continue hits_lst = [] H.reset() words = line.strip().split() event = int(words[0]) # Skipping event if it was not specified in command line if args.events is not None and event not in args.events: continue nhits = int(words[1]) print('Event {0:<5d} # hits: {1:d}'.format(event, nhits)) if args.glance: continue # Skipping event with too many hits (most likely a spark event that will take forever to process) if nhits > args.max_hits: continue # Extracting hit information for iHit in range(nhits): start = n_words_event + iHit*n_words_hit ww = words[start:start+n_words_hit] hits_lst.append([int(ww[0]), int(ww[1]), int(ww[2]), float(ww[3])]) H.add_hits(hits_lst) # Removing hits with time outside the timebox region H.hits.drop(H.hits.loc[(H.hits['time'] < config.TIMEBOX[0]) | (H.hits['time'] > config.TIMEBOX[1])].index, inplace=True) # Calculating local+global hit positions H.calc_pos(SLs) # Creating figures of the chambers figs = {} figs['sl'] = plot.book_chambers_figure(G) figs['global'] = plot.book_global_figure(G, GLOBAL_VIEW_SLs) # Analyzing hits in each SL sl_fit_results = {} for iSL, sl in SLs.items(): # print('- SL', iSL) hits_sl = H.hits.loc[H.hits['sl'] == iSL].sort_values('layer') if args.plot: # Drawing the left and right hits in local frame figs['sl'][iSL].square(x=hits_sl['lposx'], y=hits_sl['posz'], size=5, fill_color='red', fill_alpha=0.7, line_width=0) figs['sl'][iSL].square(x=hits_sl['rposx'], y=hits_sl['posz'], size=5, fill_color='blue', fill_alpha=0.7, line_width=0) # Performing track reconstruction in the local frame sl_fit_results[iSL] = [] layer_groups = hits_sl.groupby('layer').groups n_layers = len(layer_groups) # Stopping if lass than 3 layers of hits if n_layers < config.NHITS_MIN_LOCAL: continue hitid_layers = [gr.to_numpy() for gr_name, gr in layer_groups.items()] # Building the list of all possible hit combinations with 1 hit from each layer hits_layered = list(itertools.product(*hitid_layers)) # Building more combinations using only either left or right position of each hit for hit_ids in hits_layered: # print('- -', hit_ids) posz = hits_sl.loc[hits_sl.index.isin(hit_ids), 'posz'].values posx = hits_sl.loc[hits_sl.index.isin(hit_ids), ['lposx', 'rposx']].values posx_combs = list(itertools.product(*posx)) # Fitting each combination fit_results_lr = [] fit_range = (min(posz), max(posz)) for iC, posx_comb in enumerate(posx_combs): pfit, stats = Polynomial.fit(posz, posx_comb, 1, full=True, window=fit_range, domain=fit_range) chi2 = stats[0][0] / n_layers if chi2 < config.FIT_CHI2_MAX: a0, a1 = pfit fit_results_lr.append((chi2, hit_ids, pfit)) # Keeping only the best fit result from the given set of physical hits fit_results_lr.sort(key=itemgetter(0)) if fit_results_lr: sl_fit_results[iSL].append(fit_results_lr[0]) # Sorting the fit results of a SL by Chi2 sl_fit_results[iSL].sort(key=itemgetter(0)) if sl_fit_results[iSL]: # Drawing fitted tracks posz = np.array([G.SL_FRAME['b']+1, G.SL_FRAME['t']-1], dtype=np.float32) for iR, res in enumerate(sl_fit_results[iSL][:5]): col = config.TRACK_COLORS[iR] posx = res[2](posz) figs['sl'][iSL].line(x=posx, y=posz, line_color=col, line_alpha=0.7, line_width=3) if args.plot: # Drawing the left and right hits in global frame
# Storing the figures to an HTML file if args.plot: plots = [[figs['sl'][l]] for l in [3, 1, 2, 0]] plots.append([figs['global'][v] for v in ['xz', 'yz']]) bokeh.io.output_file(args.output.format(event), mode='cdn') bokeh.io.save(bokeh.layouts.layout(plots)) process(args.inputs)
for view, sls in GLOBAL_VIEW_SLs.items(): sl_ids = [sl.id for sl in sls] hits_sls = H.hits.loc[H.hits['sl'].isin(sl_ids)] figs['global'][view].square(x=hits_sls['glpos'+view[0]], y=hits_sls['glpos'+view[1]], fill_color='red', fill_alpha=0.7, line_width=0) figs['global'][view].square(x=hits_sls['grpos'+view[0]], y=hits_sls['grpos'+view[1]], fill_color='blue', fill_alpha=0.7, line_width=0) # Building 3D segments from the fit results in each SL posz = np.array([G.SL_FRAME['b'], G.SL_FRAME['t']], dtype=np.float32) for sl in sls: for iR, res in enumerate(sl_fit_results[sl.id][:5]): posx = res[2](posz) start = (posx[0], 0, posz[0]) end = (posx[1], 0, posz[1]) segL = Segment(start, end) segG = segL.fromSL(sl) segG.calc_vector() # Extending the global segment to the full height of the view start = segG.pointAtZ(plot.PLOT_RANGE['y'][0]) end = segG.pointAtZ(plot.PLOT_RANGE['y'][1]) # Getting XY coordinates of the global segment for the current view iX = COOR_ID[view[0]] posx = [start[iX], end[iX]] posy = [start[2], end[2]] # Drawing the segment col = config.TRACK_COLORS[sl.id] figs['global'][view].line(x=posx, y=posy, line_color=col, line_alpha=0.7, line_width=3) print(sl.id, iR, posx, posy)
conditional_block
params.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: params.proto package pbs import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // generate command-line: protoc -I=protos --gogofaster_out=plugins=grpc:protos/pbs protos/*.proto // connection parameters for server type ConnParameters struct { PingInterval int64 `protobuf:"varint,1,opt,name=pingInterval,proto3" json:"pingInterval,omitempty"` PingTimeout int64 `protobuf:"varint,2,opt,name=pingTimeout,proto3" json:"pingTimeout,omitempty"` SID string `protobuf:"bytes,3,opt,name=sID,proto3" json:"sID,omitempty"` Upgrades []string `protobuf:"bytes,4,rep,name=upgrades,proto3" json:"upgrades,omitempty"` } func (m *ConnParameters) Reset() { *m = ConnParameters{} } func (m *ConnParameters) String() string { return proto.CompactTextString(m) } func (*ConnParameters) ProtoMessage() {} func (*ConnParameters) Descriptor() ([]byte, []int) { return fileDescriptor_8679b07c520418a1, []int{0} } func (m *ConnParameters) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ConnParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic
else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ConnParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ConnParameters.Merge(m, src) } func (m *ConnParameters) XXX_Size() int { return m.Size() } func (m *ConnParameters) XXX_DiscardUnknown() { xxx_messageInfo_ConnParameters.DiscardUnknown(m) } var xxx_messageInfo_ConnParameters proto.InternalMessageInfo func (m *ConnParameters) GetPingInterval() int64 { if m != nil { return m.PingInterval } return 0 } func (m *ConnParameters) GetPingTimeout() int64 { if m != nil { return m.PingTimeout } return 0 } func (m *ConnParameters) GetSID() string { if m != nil { return m.SID } return "" } func (m *ConnParameters) GetUpgrades() []string { if m != nil { return m.Upgrades } return nil } func init() { proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters") } func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) } var fileDescriptor_8679b07c520418a1 = []byte{ // 182 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b, 0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4, 0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83, 0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02, 0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14, 0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67, 0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc, 0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43, 0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00, } func (m *ConnParameters) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PingInterval != 0 { dAtA[i] = 0x8 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingInterval)) } if m.PingTimeout != 0 { dAtA[i] = 0x10 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout)) } if len(m.SID) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintParams(dAtA, i, uint64(len(m.SID))) i += copy(dAtA[i:], m.SID) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } return i, nil } func encodeVarintParams(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *ConnParameters) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.PingInterval != 0 { n += 1 + sovParams(uint64(m.PingInterval)) } if m.PingTimeout != 0 { n += 1 + sovParams(uint64(m.PingTimeout)) } l = len(m.SID) if l > 0 { n += 1 + l + sovParams(uint64(l)) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { l = len(s) n += 1 + l + sovParams(uint64(l)) } } return n } func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozParams(x uint64) (n int) { return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *ConnParameters) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ConnParameters: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ConnParameters: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingInterval", wireType) } m.PingInterval = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingInterval |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingTimeout", wireType) } m.PingTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.SID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Upgrades", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.Upgrades = append(m.Upgrades, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthParams } iNdEx += length if iNdEx < 0 { return 0, ErrInvalidLengthParams } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipParams(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next if iNdEx < 0 { return 0, ErrInvalidLengthParams } } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") )
{ return xxx_messageInfo_ConnParameters.Marshal(b, m, deterministic) }
conditional_block
params.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: params.proto package pbs import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // generate command-line: protoc -I=protos --gogofaster_out=plugins=grpc:protos/pbs protos/*.proto // connection parameters for server type ConnParameters struct { PingInterval int64 `protobuf:"varint,1,opt,name=pingInterval,proto3" json:"pingInterval,omitempty"` PingTimeout int64 `protobuf:"varint,2,opt,name=pingTimeout,proto3" json:"pingTimeout,omitempty"` SID string `protobuf:"bytes,3,opt,name=sID,proto3" json:"sID,omitempty"` Upgrades []string `protobuf:"bytes,4,rep,name=upgrades,proto3" json:"upgrades,omitempty"` } func (m *ConnParameters) Reset() { *m = ConnParameters{} } func (m *ConnParameters) String() string { return proto.CompactTextString(m) } func (*ConnParameters) ProtoMessage() {} func (*ConnParameters) Descriptor() ([]byte, []int) { return fileDescriptor_8679b07c520418a1, []int{0} } func (m *ConnParameters) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ConnParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ConnParameters.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ConnParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ConnParameters.Merge(m, src) } func (m *ConnParameters) XXX_Size() int { return m.Size() } func (m *ConnParameters) XXX_DiscardUnknown() { xxx_messageInfo_ConnParameters.DiscardUnknown(m) } var xxx_messageInfo_ConnParameters proto.InternalMessageInfo func (m *ConnParameters) GetPingInterval() int64 { if m != nil { return m.PingInterval } return 0 } func (m *ConnParameters) GetPingTimeout() int64 { if m != nil { return m.PingTimeout } return 0 } func (m *ConnParameters) GetSID() string { if m != nil { return m.SID } return "" } func (m *ConnParameters) GetUpgrades() []string { if m != nil { return m.Upgrades } return nil } func init() { proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters") } func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) } var fileDescriptor_8679b07c520418a1 = []byte{ // 182 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b, 0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4, 0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83, 0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02, 0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14, 0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67, 0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc, 0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43, 0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00, } func (m *ConnParameters) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PingInterval != 0 { dAtA[i] = 0x8 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingInterval)) } if m.PingTimeout != 0 { dAtA[i] = 0x10 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout)) } if len(m.SID) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintParams(dAtA, i, uint64(len(m.SID))) i += copy(dAtA[i:], m.SID) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } return i, nil } func encodeVarintParams(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *ConnParameters)
() (n int) { if m == nil { return 0 } var l int _ = l if m.PingInterval != 0 { n += 1 + sovParams(uint64(m.PingInterval)) } if m.PingTimeout != 0 { n += 1 + sovParams(uint64(m.PingTimeout)) } l = len(m.SID) if l > 0 { n += 1 + l + sovParams(uint64(l)) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { l = len(s) n += 1 + l + sovParams(uint64(l)) } } return n } func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozParams(x uint64) (n int) { return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *ConnParameters) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ConnParameters: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ConnParameters: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingInterval", wireType) } m.PingInterval = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingInterval |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingTimeout", wireType) } m.PingTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.SID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Upgrades", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.Upgrades = append(m.Upgrades, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthParams } iNdEx += length if iNdEx < 0 { return 0, ErrInvalidLengthParams } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipParams(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next if iNdEx < 0 { return 0, ErrInvalidLengthParams } } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") )
Size
identifier_name
params.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: params.proto package pbs import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // generate command-line: protoc -I=protos --gogofaster_out=plugins=grpc:protos/pbs protos/*.proto // connection parameters for server type ConnParameters struct { PingInterval int64 `protobuf:"varint,1,opt,name=pingInterval,proto3" json:"pingInterval,omitempty"` PingTimeout int64 `protobuf:"varint,2,opt,name=pingTimeout,proto3" json:"pingTimeout,omitempty"` SID string `protobuf:"bytes,3,opt,name=sID,proto3" json:"sID,omitempty"` Upgrades []string `protobuf:"bytes,4,rep,name=upgrades,proto3" json:"upgrades,omitempty"` } func (m *ConnParameters) Reset() { *m = ConnParameters{} } func (m *ConnParameters) String() string { return proto.CompactTextString(m) } func (*ConnParameters) ProtoMessage() {} func (*ConnParameters) Descriptor() ([]byte, []int) { return fileDescriptor_8679b07c520418a1, []int{0} } func (m *ConnParameters) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ConnParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ConnParameters.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ConnParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ConnParameters.Merge(m, src) } func (m *ConnParameters) XXX_Size() int { return m.Size() } func (m *ConnParameters) XXX_DiscardUnknown() { xxx_messageInfo_ConnParameters.DiscardUnknown(m) } var xxx_messageInfo_ConnParameters proto.InternalMessageInfo func (m *ConnParameters) GetPingInterval() int64 { if m != nil { return m.PingInterval } return 0 }
return 0 } func (m *ConnParameters) GetSID() string { if m != nil { return m.SID } return "" } func (m *ConnParameters) GetUpgrades() []string { if m != nil { return m.Upgrades } return nil } func init() { proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters") } func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) } var fileDescriptor_8679b07c520418a1 = []byte{ // 182 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b, 0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4, 0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83, 0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02, 0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14, 0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67, 0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc, 0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43, 0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00, } func (m *ConnParameters) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PingInterval != 0 { dAtA[i] = 0x8 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingInterval)) } if m.PingTimeout != 0 { dAtA[i] = 0x10 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout)) } if len(m.SID) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintParams(dAtA, i, uint64(len(m.SID))) i += copy(dAtA[i:], m.SID) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } return i, nil } func encodeVarintParams(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *ConnParameters) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.PingInterval != 0 { n += 1 + sovParams(uint64(m.PingInterval)) } if m.PingTimeout != 0 { n += 1 + sovParams(uint64(m.PingTimeout)) } l = len(m.SID) if l > 0 { n += 1 + l + sovParams(uint64(l)) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { l = len(s) n += 1 + l + sovParams(uint64(l)) } } return n } func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozParams(x uint64) (n int) { return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *ConnParameters) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ConnParameters: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ConnParameters: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingInterval", wireType) } m.PingInterval = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingInterval |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingTimeout", wireType) } m.PingTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.SID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Upgrades", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.Upgrades = append(m.Upgrades, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthParams } iNdEx += length if iNdEx < 0 { return 0, ErrInvalidLengthParams } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipParams(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next if iNdEx < 0 { return 0, ErrInvalidLengthParams } } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") )
func (m *ConnParameters) GetPingTimeout() int64 { if m != nil { return m.PingTimeout }
random_line_split
params.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: params.proto package pbs import ( fmt "fmt" proto "github.com/gogo/protobuf/proto" io "io" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // generate command-line: protoc -I=protos --gogofaster_out=plugins=grpc:protos/pbs protos/*.proto // connection parameters for server type ConnParameters struct { PingInterval int64 `protobuf:"varint,1,opt,name=pingInterval,proto3" json:"pingInterval,omitempty"` PingTimeout int64 `protobuf:"varint,2,opt,name=pingTimeout,proto3" json:"pingTimeout,omitempty"` SID string `protobuf:"bytes,3,opt,name=sID,proto3" json:"sID,omitempty"` Upgrades []string `protobuf:"bytes,4,rep,name=upgrades,proto3" json:"upgrades,omitempty"` } func (m *ConnParameters) Reset() { *m = ConnParameters{} } func (m *ConnParameters) String() string { return proto.CompactTextString(m) } func (*ConnParameters) ProtoMessage() {} func (*ConnParameters) Descriptor() ([]byte, []int) { return fileDescriptor_8679b07c520418a1, []int{0} } func (m *ConnParameters) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *ConnParameters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_ConnParameters.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalTo(b) if err != nil { return nil, err } return b[:n], nil } } func (m *ConnParameters) XXX_Merge(src proto.Message) { xxx_messageInfo_ConnParameters.Merge(m, src) } func (m *ConnParameters) XXX_Size() int { return m.Size() } func (m *ConnParameters) XXX_DiscardUnknown()
var xxx_messageInfo_ConnParameters proto.InternalMessageInfo func (m *ConnParameters) GetPingInterval() int64 { if m != nil { return m.PingInterval } return 0 } func (m *ConnParameters) GetPingTimeout() int64 { if m != nil { return m.PingTimeout } return 0 } func (m *ConnParameters) GetSID() string { if m != nil { return m.SID } return "" } func (m *ConnParameters) GetUpgrades() []string { if m != nil { return m.Upgrades } return nil } func init() { proto.RegisterType((*ConnParameters)(nil), "core.go.ConnParameters") } func init() { proto.RegisterFile("params.proto", fileDescriptor_8679b07c520418a1) } var fileDescriptor_8679b07c520418a1 = []byte{ // 182 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4f, 0xce, 0x2f, 0x4a, 0xd5, 0x4b, 0xcf, 0x57, 0x6a, 0x61, 0xe4, 0xe2, 0x73, 0xce, 0xcf, 0xcb, 0x0b, 0x00, 0xc9, 0xa6, 0x96, 0xa4, 0x16, 0x15, 0x0b, 0x29, 0x71, 0xf1, 0x14, 0x64, 0xe6, 0xa5, 0x7b, 0xe6, 0x95, 0xa4, 0x16, 0x95, 0x25, 0xe6, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xa1, 0x88, 0x09, 0x29, 0x70, 0x71, 0x83, 0xf8, 0x21, 0x99, 0xb9, 0xa9, 0xf9, 0xa5, 0x25, 0x12, 0x4c, 0x60, 0x25, 0xc8, 0x42, 0x42, 0x02, 0x5c, 0xcc, 0xc5, 0x9e, 0x2e, 0x12, 0xcc, 0x0a, 0x8c, 0x1a, 0x9c, 0x41, 0x20, 0xa6, 0x90, 0x14, 0x17, 0x47, 0x69, 0x41, 0x7a, 0x51, 0x62, 0x4a, 0x6a, 0xb1, 0x04, 0x8b, 0x02, 0xb3, 0x06, 0x67, 0x10, 0x9c, 0xef, 0x24, 0x7b, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, 0xc7, 0x72, 0x0c, 0x51, 0xcc, 0x05, 0x49, 0xc5, 0x49, 0x6c, 0x60, 0x57, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x52, 0x43, 0xb7, 0x5d, 0xc5, 0x00, 0x00, 0x00, } func (m *ConnParameters) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } return dAtA[:n], nil } func (m *ConnParameters) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l if m.PingInterval != 0 { dAtA[i] = 0x8 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingInterval)) } if m.PingTimeout != 0 { dAtA[i] = 0x10 i++ i = encodeVarintParams(dAtA, i, uint64(m.PingTimeout)) } if len(m.SID) > 0 { dAtA[i] = 0x1a i++ i = encodeVarintParams(dAtA, i, uint64(len(m.SID))) i += copy(dAtA[i:], m.SID) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { dAtA[i] = 0x22 i++ l = len(s) for l >= 1<<7 { dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } dAtA[i] = uint8(l) i++ i += copy(dAtA[i:], s) } } return i, nil } func encodeVarintParams(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return offset + 1 } func (m *ConnParameters) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.PingInterval != 0 { n += 1 + sovParams(uint64(m.PingInterval)) } if m.PingTimeout != 0 { n += 1 + sovParams(uint64(m.PingTimeout)) } l = len(m.SID) if l > 0 { n += 1 + l + sovParams(uint64(l)) } if len(m.Upgrades) > 0 { for _, s := range m.Upgrades { l = len(s) n += 1 + l + sovParams(uint64(l)) } } return n } func sovParams(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozParams(x uint64) (n int) { return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *ConnParameters) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: ConnParameters: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: ConnParameters: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { case 1: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingInterval", wireType) } m.PingInterval = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingInterval |= int64(b&0x7F) << shift if b < 0x80 { break } } case 2: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field PingTimeout", wireType) } m.PingTimeout = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ m.PingTimeout |= int64(b&0x7F) << shift if b < 0x80 { break } } case 3: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field SID", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.SID = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex case 4: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Upgrades", wireType) } var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowParams } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ stringLen |= uint64(b&0x7F) << shift if b < 0x80 { break } } intStringLen := int(stringLen) if intStringLen < 0 { return ErrInvalidLengthParams } postIndex := iNdEx + intStringLen if postIndex < 0 { return ErrInvalidLengthParams } if postIndex > l { return io.ErrUnexpectedEOF } m.Upgrades = append(m.Upgrades, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipParams(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) < 0 { return ErrInvalidLengthParams } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipParams(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } return iNdEx, nil case 1: iNdEx += 8 return iNdEx, nil case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthParams } iNdEx += length if iNdEx < 0 { return 0, ErrInvalidLengthParams } return iNdEx, nil case 3: for { var innerWire uint64 var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowParams } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } innerWireType := int(innerWire & 0x7) if innerWireType == 4 { break } next, err := skipParams(dAtA[start:]) if err != nil { return 0, err } iNdEx = start + next if iNdEx < 0 { return 0, ErrInvalidLengthParams } } return iNdEx, nil case 4: return iNdEx, nil case 5: iNdEx += 4 return iNdEx, nil default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } } panic("unreachable") } var ( ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") )
{ xxx_messageInfo_ConnParameters.DiscardUnknown(m) }
identifier_body