file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
zkdevice.py
''' This script will do auto-check in/out for ZMM100 fingerprint access control device by ZKSoftware. At my office, the manager uses an application to load data from the fingerprint device. After he loads data, log in device's database is cleared. So in my case, I write this script to automate checking in/out everyday. Device is running linux with busybox, so I have access to ftpput, ftpget and wget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db. This is a sqlite3 database file. User info is in USER_INFO, user transactions are in ATT_LOG table. Procedure: - telnet into the device - ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server - edit ZKDB.db file on server - ftpget ZKDB.db from FTP server ''' import argparse import datetime import os import random import sqlite3 import subprocess as spr import sys import telnetlib def get_server_ip(device_ip): import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((device_ip, 80)) return s.getsockname()[0] def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'): ''' Transfer file from from_ip to to_ip via telnet. Use ftpput and ftpget. ''' # ====FTP Server==== try: import pyftpdlib except ImportError: import pip pip.main('install pyftpdlib'.split()) # start pyftpdlib FTP server: anonymous with write permission, port 2121 ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w']) print('Server started') filename = os.path.basename(remote_file_path) s = telnetlib.Telnet(DEVICE_IP) print(s.read_until(b'login: ').decode()) s.write(b'root \n') print(s.read_until(b'Password: ').decode()) s.write(b'solokey\n') if s.read_until(b'#'): s.write(bytes('ls %s\n' % DB_PATH, 'utf-8')) files = s.read_until(b'#').decode() if filename in files: while True: if cmd == 'ftpput': command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, filename, remote_file_path), 'utf-8') elif cmd == 'ftpget': command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8') else: raise ValueError('cmd must be `ftpput` or `ftpget`') s.write(command) ret = s.read_until(b'#').decode() if 'refused' not in ret: print(ret) break # stop pyftpdlib FTP server ftp_server.kill() print('Server killed') def generate_verify_time(status='in', late=False): ''' Generate normal verify time based on status `in` or `out` `in` time will be random 10 mins before 8:00 `out` time will be random 10 mins after 17:00 ''' if status == 'in': status = 0 if not late: hour = 7 minute = random.randint(50, 59) else: hour = 8 minute = random.randint(15, 20) elif status == 'out': status = 1 hour = 17 minute = random.randint(0, 10) else: raise ValueError('status must be `in` or `out`') second = random.randint(0, 59) time = datetime.time(hour, minute, second) return time def add_log(uid, date, status, late=False): ''' Edit ZKDB.db file, ATT_LOG table, insert a row which represents a check in/out log uid: User PIN date: follow format: dd/mm/yyyy - 14/01/2017 status: 'in' is checking in, 'out' is checking out ''' # verify_type: 0 is password, 1 is fingerprint verify_type = 1 if status == 'in': status = 0 time = generate_verify_time('in', late=late) elif status == 'out': status = 1 time = generate_verify_time('out') else: raise ValueError('status must be `in` or `out`') date = datetime.datetime.strptime(date, '%d/%m/%Y') combined = datetime.datetime.combine(date, time) verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined) with sqlite3.connect(DB) as conn: query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, ' 'Status, Work_Code_ID, SEND_FLAG) ' 'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type, verify_time, status, 0, 0) cur = conn.execute(query) cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG') r = cur.fetchone() print_log(r, uid, verify_type, verify_time, status) def
(uid, start, end, status, late=False): start_date = datetime.datetime.strptime(start, '%d/%m/%Y') end_date = datetime.datetime.strptime(end, '%d/%m/%Y') day_count = end_date - start_date day_count = day_count.days + 1 for date in (start_date + datetime.timedelta(i) for i in range(day_count)): date = '{:%d/%m/%Y}'.format(date) add_log(uid, date, status, late) def delete_log(log_id): ''' Delete a log row with ID=log_id ''' with sqlite3.connect(DB) as conn: query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)) conn.execute(query) print('Deleted log {}'.format(log_id)) def get_logs(uid, start_date, end_date): ''' Returns logs of 'uid' from 'start_date' to 'end_date' uid: User PIN start_date: follow format 14/01/2017 end_date: follow format 15/01/2017 Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status) ''' start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y') end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y') with sqlite3.connect(DB) as conn: query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status ' 'FROM ATT_LOG WHERE User_PIN = {}'.format(uid)) cur = conn.execute(query) rows = cur.fetchall() ret = [] for row in rows: log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S') if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1): ret.append(row) return ret def get_logs_by_date(uid, date): return get_logs(uid, date, date) def print_log(*log_row): ''' Pretty print a log row log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status) ''' id, uid, verify_type, verify_time, status = log_row if status == 1: status = 'Check out' elif status == 0: status = 'Check in' print('{}. {} {} at {}'.format(id, uid, status, verify_time)) def check_log_row(log_row): ''' Each day must have exactly 2 logs. One for checking in, before 8:00:00 One for checking out, after 17:00:00 Return True if satisfies all conditions, else False ''' in_time = datetime.time(8, 0, 0) out_time = datetime.time(17, 0, 0) log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S') status = log_row[-1] if status == 1 and log_date.time() < out_time: print('Early log on {}: {}'.format(log_date.date(), log_date)) return False elif status == 0 and log_date.time() > in_time: print('Late log on {}: {}'.format(log_date.date(), log_date)) return False else: return True def check_log_by_date(uid, date): pass def fix_logs(uid, start_date, end_date): ''' Fix logs of uid from start_date to end_date A normalized log contains 2 logs per day One check in log before 8:00 One check out log after 17:00 ''' start_date = '{:%d/%m/%Y}'.format(start_date) end_date = '{:%d/%m/%Y}'.format(end_date) day_count = (end_date - start_date) + 1 for date in (start_date + datetime.timedelta(i) for i in range(day_count)): date = '{:%d/%m/%Y}'.format(date.date) logs = get_logs_by_date(uid, date) if len(logs) == 2: if not check_log_row(logs[0]) or not check_log_row(logs[1]): delete_log(logs[0][0]) delete_log(logs[1][0]) add_log(uid, date, 'in') add_log(uid, date, 'out') elif len(logs) == 0: add_log(uid, date, 'in') add_log(uid, date, 'out') else: for log in logs: delete_log(log[0]) add_log(uid, date, 'in') add_log(uid, date, 'out') def main(): today = '{:%d/%m/%Y}'.format(datetime.date.today()) parser = argparse.ArgumentParser() parser.add_argument('action', help='`get`, `checkin`, `checkout`, ' '`add` or `fix` logs', default='get') parser.add_argument('uids', help='User PINs', type=int, nargs='*') parser.add_argument('-d', '--date', help='Date', default=today) parser.add_argument('-r', '--range', help='Range of date, ex. 01/01/2017-02/01/2017') parser.add_argument('--log', help='log id to delete') parser.add_argument('--late', help='Checkin late or not', action='store_true') args = parser.parse_args() uids = args.uids date = args.date or today if not args.range: start, end = date, date else: start, end = args.range.split('-') transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput') for uid in uids: if args.action == 'get': logs = get_logs(uid, start, end) for log in logs: print_log(*log) elif args.action == 'checkin': add_logs(uid, start, end, 'in', late=args.late) elif args.action == 'checkout': add_logs(uid, start, end, 'out') elif args.action == 'add': add_log(uid, start, end) elif args.action == 'fix': fix_logs(uid, start, end) elif args.action == 'delete': delete_log(args.log) else: raise ValueError('Action must be `get`, `checkin`, `checkout`, ' '`fix` or `delete`') transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget') if __name__ == '__main__': # ====config==== DEVICE_IP = '10.0.0.204' # todo: find IP, input IP DB_PATH = '/mnt/mtdblock/data/ZKDB.db' DB = os.path.basename(DB_PATH) server_ip = get_server_ip(DEVICE_IP) main()
add_logs
identifier_name
zkdevice.py
''' This script will do auto-check in/out for ZMM100 fingerprint access control device by ZKSoftware. At my office, the manager uses an application to load data from the fingerprint device. After he loads data, log in device's database is cleared. So in my case, I write this script to automate checking in/out everyday. Device is running linux with busybox, so I have access to ftpput, ftpget and wget commands (ftpd is missing). Data is stored in /mnt/mtdblock/data/ZKDB.db. This is a sqlite3 database file. User info is in USER_INFO, user transactions are in ATT_LOG table. Procedure: - telnet into the device - ftpput database file at /mnt/mtdblock/data/ZKDB.db to a temporary FTP server - edit ZKDB.db file on server - ftpget ZKDB.db from FTP server ''' import argparse import datetime import os import random import sqlite3 import subprocess as spr import sys import telnetlib def get_server_ip(device_ip): import socket s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) s.connect((device_ip, 80)) return s.getsockname()[0] def transfer_file(from_ip, to_ip, remote_file_path, cmd='ftpput'): ''' Transfer file from from_ip to to_ip via telnet. Use ftpput and ftpget. ''' # ====FTP Server==== try: import pyftpdlib except ImportError: import pip pip.main('install pyftpdlib'.split()) # start pyftpdlib FTP server: anonymous with write permission, port 2121 ftp_server = spr.Popen([sys.executable, '-m', 'pyftpdlib', '-w']) print('Server started') filename = os.path.basename(remote_file_path) s = telnetlib.Telnet(DEVICE_IP) print(s.read_until(b'login: ').decode()) s.write(b'root \n') print(s.read_until(b'Password: ').decode()) s.write(b'solokey\n') if s.read_until(b'#'): s.write(bytes('ls %s\n' % DB_PATH, 'utf-8')) files = s.read_until(b'#').decode() if filename in files: while True: if cmd == 'ftpput': command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, filename, remote_file_path), 'utf-8') elif cmd == 'ftpget': command = bytes('%s -P 2121 %s %s %s\n' % (cmd, server_ip, remote_file_path, filename), 'utf-8') else: raise ValueError('cmd must be `ftpput` or `ftpget`') s.write(command) ret = s.read_until(b'#').decode() if 'refused' not in ret: print(ret) break # stop pyftpdlib FTP server ftp_server.kill() print('Server killed') def generate_verify_time(status='in', late=False): ''' Generate normal verify time based on status `in` or `out` `in` time will be random 10 mins before 8:00 `out` time will be random 10 mins after 17:00 ''' if status == 'in': status = 0 if not late: hour = 7 minute = random.randint(50, 59) else: hour = 8 minute = random.randint(15, 20) elif status == 'out': status = 1 hour = 17 minute = random.randint(0, 10) else: raise ValueError('status must be `in` or `out`') second = random.randint(0, 59) time = datetime.time(hour, minute, second) return time def add_log(uid, date, status, late=False): ''' Edit ZKDB.db file, ATT_LOG table, insert a row which represents a check in/out log uid: User PIN date: follow format: dd/mm/yyyy - 14/01/2017 status: 'in' is checking in, 'out' is checking out ''' # verify_type: 0 is password, 1 is fingerprint verify_type = 1 if status == 'in': status = 0 time = generate_verify_time('in', late=late) elif status == 'out': status = 1 time = generate_verify_time('out') else: raise ValueError('status must be `in` or `out`') date = datetime.datetime.strptime(date, '%d/%m/%Y') combined = datetime.datetime.combine(date, time) verify_time = '{:%Y-%m-%dT%H:%M:%S}'.format(combined) with sqlite3.connect(DB) as conn: query = ('INSERT INTO ATT_LOG (User_PIN, Verify_Type, Verify_Time, ' 'Status, Work_Code_ID, SEND_FLAG) ' 'VALUES ({}, {}, "{}", {}, 0, 0)').format(uid, verify_type, verify_time, status, 0, 0) cur = conn.execute(query) cur = conn.execute('SELECT last_insert_rowid() FROM ATT_LOG') r = cur.fetchone() print_log(r, uid, verify_type, verify_time, status) def add_logs(uid, start, end, status, late=False): start_date = datetime.datetime.strptime(start, '%d/%m/%Y') end_date = datetime.datetime.strptime(end, '%d/%m/%Y') day_count = end_date - start_date day_count = day_count.days + 1 for date in (start_date + datetime.timedelta(i) for i in range(day_count)): date = '{:%d/%m/%Y}'.format(date) add_log(uid, date, status, late) def delete_log(log_id): ''' Delete a log row with ID=log_id ''' with sqlite3.connect(DB) as conn: query = ('DELETE FROM ATT_LOG WHERE ID={}'.format(log_id)) conn.execute(query) print('Deleted log {}'.format(log_id)) def get_logs(uid, start_date, end_date): ''' Returns logs of 'uid' from 'start_date' to 'end_date' uid: User PIN start_date: follow format 14/01/2017 end_date: follow format 15/01/2017 Return format: list of (ID, User_PIN, Verify_Type, Verify_Time, Status) ''' start_date = datetime.datetime.strptime(start_date, '%d/%m/%Y') end_date = datetime.datetime.strptime(end_date, '%d/%m/%Y') with sqlite3.connect(DB) as conn: query = ('SELECT ID, User_PIN, Verify_Type, Verify_Time, Status ' 'FROM ATT_LOG WHERE User_PIN = {}'.format(uid)) cur = conn.execute(query) rows = cur.fetchall() ret = [] for row in rows: log_date = datetime.datetime.strptime(row[-2], '%Y-%m-%dT%H:%M:%S') if log_date >= start_date and log_date <= end_date + datetime.timedelta(days=1): ret.append(row) return ret def get_logs_by_date(uid, date): return get_logs(uid, date, date) def print_log(*log_row):
def check_log_row(log_row): ''' Each day must have exactly 2 logs. One for checking in, before 8:00:00 One for checking out, after 17:00:00 Return True if satisfies all conditions, else False ''' in_time = datetime.time(8, 0, 0) out_time = datetime.time(17, 0, 0) log_date = datetime.datetime.strptime(log_row[2], '%Y-%m-%dT%H:%M:%S') status = log_row[-1] if status == 1 and log_date.time() < out_time: print('Early log on {}: {}'.format(log_date.date(), log_date)) return False elif status == 0 and log_date.time() > in_time: print('Late log on {}: {}'.format(log_date.date(), log_date)) return False else: return True def check_log_by_date(uid, date): pass def fix_logs(uid, start_date, end_date): ''' Fix logs of uid from start_date to end_date A normalized log contains 2 logs per day One check in log before 8:00 One check out log after 17:00 ''' start_date = '{:%d/%m/%Y}'.format(start_date) end_date = '{:%d/%m/%Y}'.format(end_date) day_count = (end_date - start_date) + 1 for date in (start_date + datetime.timedelta(i) for i in range(day_count)): date = '{:%d/%m/%Y}'.format(date.date) logs = get_logs_by_date(uid, date) if len(logs) == 2: if not check_log_row(logs[0]) or not check_log_row(logs[1]): delete_log(logs[0][0]) delete_log(logs[1][0]) add_log(uid, date, 'in') add_log(uid, date, 'out') elif len(logs) == 0: add_log(uid, date, 'in') add_log(uid, date, 'out') else: for log in logs: delete_log(log[0]) add_log(uid, date, 'in') add_log(uid, date, 'out') def main(): today = '{:%d/%m/%Y}'.format(datetime.date.today()) parser = argparse.ArgumentParser() parser.add_argument('action', help='`get`, `checkin`, `checkout`, ' '`add` or `fix` logs', default='get') parser.add_argument('uids', help='User PINs', type=int, nargs='*') parser.add_argument('-d', '--date', help='Date', default=today) parser.add_argument('-r', '--range', help='Range of date, ex. 01/01/2017-02/01/2017') parser.add_argument('--log', help='log id to delete') parser.add_argument('--late', help='Checkin late or not', action='store_true') args = parser.parse_args() uids = args.uids date = args.date or today if not args.range: start, end = date, date else: start, end = args.range.split('-') transfer_file(DEVICE_IP, server_ip, DB_PATH, cmd='ftpput') for uid in uids: if args.action == 'get': logs = get_logs(uid, start, end) for log in logs: print_log(*log) elif args.action == 'checkin': add_logs(uid, start, end, 'in', late=args.late) elif args.action == 'checkout': add_logs(uid, start, end, 'out') elif args.action == 'add': add_log(uid, start, end) elif args.action == 'fix': fix_logs(uid, start, end) elif args.action == 'delete': delete_log(args.log) else: raise ValueError('Action must be `get`, `checkin`, `checkout`, ' '`fix` or `delete`') transfer_file(server_ip, DEVICE_IP, DB_PATH, cmd='ftpget') if __name__ == '__main__': # ====config==== DEVICE_IP = '10.0.0.204' # todo: find IP, input IP DB_PATH = '/mnt/mtdblock/data/ZKDB.db' DB = os.path.basename(DB_PATH) server_ip = get_server_ip(DEVICE_IP) main()
''' Pretty print a log row log row format: (ID, User_PIN, Verify_Type, Verify_Time, Status) ''' id, uid, verify_type, verify_time, status = log_row if status == 1: status = 'Check out' elif status == 0: status = 'Check in' print('{}. {} {} at {}'.format(id, uid, status, verify_time))
identifier_body
obj.rs
//! Object file builder. //! //! Creates ELF image based on `Compilation` information. The ELF contains //! functions and trampolines in the ".text" section. It also contains all //! relocation records for the linking stage. If DWARF sections exist, their //! content will be written as well. //! //! The object file has symbols for each function and trampoline, as well as //! symbols that refer to libcalls. //! //! The function symbol names have format "_wasm_function_N", where N is //! `FuncIndex`. The defined wasm function symbols refer to a JIT compiled //! function body, the imported wasm function do not. The trampolines symbol //! names have format "_trampoline_N", where N is `SignatureIndex`. use crate::{CompiledFuncEnv, CompiledFunction, RelocationTarget}; use anyhow::Result; use cranelift_codegen::binemit::Reloc; use cranelift_codegen::ir::LibCall; use cranelift_codegen::isa::unwind::{systemv, UnwindInfo}; use cranelift_codegen::TextSectionBuilder; use cranelift_control::ControlPlane; use gimli::write::{Address, EhFrame, EndianVec, FrameTable, Writer}; use gimli::RunTimeEndian; use object::write::{Object, SectionId, StandardSegment, Symbol, SymbolId, SymbolSection}; use object::{Architecture, SectionKind, SymbolFlags, SymbolKind, SymbolScope}; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use wasmtime_environ::{Compiler, FuncIndex}; const TEXT_SECTION_NAME: &[u8] = b".text"; /// A helper structure used to assemble the final text section of an exectuable, /// plus unwinding information and other related details. /// /// This builder relies on Cranelift-specific internals but assembles into a /// generic `Object` which will get further appended to in a compiler-agnostic /// fashion later. pub struct ModuleTextBuilder<'a> { /// The target that we're compiling for, used to query target-specific /// information as necessary. compiler: &'a dyn Compiler, /// The object file that we're generating code into. obj: &'a mut Object<'static>, /// The WebAssembly module we're generating code for. text_section: SectionId, unwind_info: UnwindInfoBuilder<'a>, /// In-progress text section that we're using cranelift's `MachBuffer` to /// build to resolve relocations (calls) between functions. text: Box<dyn TextSectionBuilder>, /// Symbols defined in the object for libcalls that relocations are applied /// against. /// /// Note that this isn't typically used. It's only used for SSE-disabled /// builds without SIMD on x86_64 right now. libcall_symbols: HashMap<LibCall, SymbolId>, ctrl_plane: ControlPlane, } impl<'a> ModuleTextBuilder<'a> { /// Creates a new builder for the text section of an executable. /// /// The `.text` section will be appended to the specified `obj` along with /// any unwinding or such information as necessary. The `num_funcs` /// parameter indicates the number of times the `append_func` function will /// be called. The `finish` function will panic if this contract is not met. pub fn new( obj: &'a mut Object<'static>, compiler: &'a dyn Compiler, text: Box<dyn TextSectionBuilder>, ) -> Self { // Entire code (functions and trampolines) will be placed // in the ".text" section. let text_section = obj.add_section( obj.segment_name(StandardSegment::Text).to_vec(), TEXT_SECTION_NAME.to_vec(), SectionKind::Text, ); Self { compiler, obj, text_section, unwind_info: Default::default(), text, libcall_symbols: HashMap::default(), ctrl_plane: ControlPlane::default(), } } /// Appends the `func` specified named `name` to this object. /// /// The `resolve_reloc_target` closure is used to resolve a relocation /// target to an adjacent function which has already been added or will be /// added to this object. The argument is the relocation target specified
/// /// Returns the symbol associated with the function as well as the range /// that the function resides within the text section. pub fn append_func( &mut self, name: &str, compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>, resolve_reloc_target: impl Fn(FuncIndex) -> usize, ) -> (SymbolId, Range<u64>) { let body = compiled_func.buffer.data(); let alignment = compiled_func.alignment; let body_len = body.len() as u64; let off = self .text .append(true, &body, alignment, &mut self.ctrl_plane); let symbol_id = self.obj.add_symbol(Symbol { name: name.as_bytes().to_vec(), value: off, size: body_len, kind: SymbolKind::Text, scope: SymbolScope::Compilation, weak: false, section: SymbolSection::Section(self.text_section), flags: SymbolFlags::None, }); if let Some(info) = compiled_func.unwind_info() { self.unwind_info.push(off, body_len, info); } for r in compiled_func.relocations() { match r.reloc_target { // Relocations against user-defined functions means that this is // a relocation against a module-local function, typically a // call between functions. The `text` field is given priority to // resolve this relocation before we actually emit an object // file, but if it can't handle it then we pass through the // relocation. RelocationTarget::UserFunc(index) => { let target = resolve_reloc_target(index); if self .text .resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target) { continue; } // At this time it's expected that all relocations are // handled by `text.resolve_reloc`, and anything that isn't // handled is a bug in `text.resolve_reloc` or something // transitively there. If truly necessary, though, then this // loop could also be updated to forward the relocation to // the final object file as well. panic!( "unresolved relocation could not be processed against \ {index:?}: {r:?}" ); } // Relocations against libcalls are not common at this time and // are only used in non-default configurations that disable wasm // SIMD, disable SSE features, and for wasm modules that still // use floating point operations. // // Currently these relocations are all expected to be absolute // 8-byte relocations so that's asserted here and then encoded // directly into the object as a normal object relocation. This // is processed at module load time to resolve the relocations. RelocationTarget::LibCall(call) => { let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| { self.obj.add_symbol(Symbol { name: libcall_name(call).as_bytes().to_vec(), value: 0, size: 0, kind: SymbolKind::Text, scope: SymbolScope::Linkage, weak: false, section: SymbolSection::Undefined, flags: SymbolFlags::None, }) }); let (encoding, kind, size) = match r.reloc { Reloc::Abs8 => ( object::RelocationEncoding::Generic, object::RelocationKind::Absolute, 8, ), other => unimplemented!("unimplemented relocation kind {other:?}"), }; self.obj .add_relocation( self.text_section, object::write::Relocation { symbol, size, kind, encoding, offset: off + u64::from(r.offset), addend: r.addend, }, ) .unwrap(); } }; } (symbol_id, off..off + body_len) } /// Forces "veneers" to be used for inter-function calls in the text /// section which means that in-bounds optimized addresses are never used. /// /// This is only useful for debugging cranelift itself and typically this /// option is disabled. pub fn force_veneers(&mut self) { self.text.force_veneers(); } /// Appends the specified amount of bytes of padding into the text section. /// /// This is only useful when fuzzing and/or debugging cranelift itself and /// for production scenarios `padding` is 0 and this function does nothing. pub fn append_padding(&mut self, padding: usize) { if padding == 0 { return; } self.text .append(false, &vec![0; padding], 1, &mut self.ctrl_plane); } /// Indicates that the text section has been written completely and this /// will finish appending it to the original object. /// /// Note that this will also write out the unwind information sections if /// necessary. pub fn finish(mut self) { // Finish up the text section now that we're done adding functions. let text = self.text.finish(&mut self.ctrl_plane); self.obj .section_mut(self.text_section) .set_data(text, self.compiler.page_size_align()); // Append the unwind information for all our functions, if necessary. self.unwind_info .append_section(self.compiler, self.obj, self.text_section); } } /// Builder used to create unwind information for a set of functions added to a /// text section. #[derive(Default)] struct UnwindInfoBuilder<'a> { windows_xdata: Vec<u8>, windows_pdata: Vec<RUNTIME_FUNCTION>, systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>, } // This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here // to ensure everything is always `u32` and to have it available on all // platforms. Note that all of these specifiers here are relative to a "base // address" which we define as the base of where the text section is eventually // loaded. #[allow(non_camel_case_types)] struct RUNTIME_FUNCTION { begin: u32, end: u32, unwind_address: u32, } impl<'a> UnwindInfoBuilder<'a> { /// Pushes the unwind information for a function into this builder. /// /// The function being described must be located at `function_offset` within /// the text section itself, and the function's size is specified by /// `function_len`. /// /// The `info` should come from Cranelift. and is handled here depending on /// its flavor. fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) { match info { // Windows unwind information is stored in two locations: // // * First is the actual unwinding information which is stored // in the `.xdata` section. This is where `info`'s emitted // information will go into. // * Second are pointers to connect all this unwind information, // stored in the `.pdata` section. The `.pdata` section is an // array of `RUNTIME_FUNCTION` structures. // // Due to how these will be loaded at runtime the `.pdata` isn't // actually assembled byte-wise here. Instead that's deferred to // happen later during `write_windows_unwind_info` which will apply // a further offset to `unwind_address`. UnwindInfo::WindowsX64(info) => { let unwind_size = info.emit_size(); let mut unwind_info = vec![0; unwind_size]; info.emit(&mut unwind_info); // `.xdata` entries are always 4-byte aligned // // FIXME: in theory we could "intern" the `unwind_info` value // here within the `.xdata` section. Most of our unwind // information for functions is probably pretty similar in which // case the `.xdata` could be quite small and `.pdata` could // have multiple functions point to the same unwinding // information. while self.windows_xdata.len() % 4 != 0 { self.windows_xdata.push(0x00); } let unwind_address = self.windows_xdata.len(); self.windows_xdata.extend_from_slice(&unwind_info); // Record a `RUNTIME_FUNCTION` which this will point to. self.windows_pdata.push(RUNTIME_FUNCTION { begin: u32::try_from(function_offset).unwrap(), end: u32::try_from(function_offset + function_len).unwrap(), unwind_address: u32::try_from(unwind_address).unwrap(), }); } // System-V is different enough that we just record the unwinding // information to get processed at a later time. UnwindInfo::SystemV(info) => { self.systemv_unwind_info.push((function_offset, info)); } _ => panic!("some unwind info isn't handled here"), } } /// Appends the unwind information section, if any, to the `obj` specified. /// /// This function must be called immediately after the text section was /// added to a builder. The unwind information section must trail the text /// section immediately. /// /// The `text_section`'s section identifier is passed into this function. fn append_section( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, text_section: SectionId, ) { // This write will align the text section to a page boundary and then // return the offset at that point. This gives us the full size of the // text section at that point, after alignment. let text_section_size = obj.append_section_data(text_section, &[], compiler.page_size_align()); if self.windows_xdata.len() > 0 { assert!(self.systemv_unwind_info.len() == 0); // The `.xdata` section must come first to be just-after the `.text` // section for the reasons documented in `write_windows_unwind_info` // below. let segment = obj.segment_name(StandardSegment::Data).to_vec(); let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData); let segment = obj.segment_name(StandardSegment::Data).to_vec(); let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData); self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size); } if self.systemv_unwind_info.len() > 0 { let segment = obj.segment_name(StandardSegment::Data).to_vec(); let section_id = obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData); self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size) } } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into /// the object file itself. This way registration of unwind info can simply /// pass this slice to the OS itself and there's no need to recalculate /// anything on the other end of loading a module from a precompiled object. /// /// Support for reading this is in `crates/jit/src/unwind/winx64.rs`. fn write_windows_unwind_info( &self, obj: &mut Object<'_>, xdata_id: SectionId, pdata_id: SectionId, text_section_size: u64, ) { // Currently the binary format supported here only supports // little-endian for x86_64, or at least that's all where it's tested. // This may need updates for other platforms. assert_eq!(obj.architecture(), Architecture::X86_64); // Append the `.xdata` section, or the actual unwinding information // codes and such which were built as we found unwind information for // functions. obj.append_section_data(xdata_id, &self.windows_xdata, 4); // Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION` // structures stored in the binary. // // This memory will be passed at runtime to `RtlAddFunctionTable` which // takes a "base address" and the entries within `RUNTIME_FUNCTION` are // all relative to this base address. The base address we pass is the // address of the text section itself so all the pointers here must be // text-section-relative. The `begin` and `end` fields for the function // it describes are already text-section-relative, but the // `unwind_address` field needs to be updated here since the value // stored right now is `xdata`-section-relative. We know that the // `xdata` section follows the `.text` section so the // `text_section_size` is added in to calculate the final // `.text`-section-relative address of the unwind information. let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4); for info in self.windows_pdata.iter() { pdata.extend_from_slice(&info.begin.to_le_bytes()); pdata.extend_from_slice(&info.end.to_le_bytes()); let address = text_section_size + u64::from(info.unwind_address); let address = u32::try_from(address).unwrap(); pdata.extend_from_slice(&address.to_le_bytes()); } obj.append_section_data(pdata_id, &pdata, 4); } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This will generate a `.eh_frame` section, but not one that can be /// naively loaded. The goal of this section is that we can create the /// section once here and never again does it need to change. To describe /// dynamically loaded functions though each individual FDE needs to talk /// about the function's absolute address that it's referencing. Naturally /// we don't actually know the function's absolute address when we're /// creating an object here. /// /// To solve this problem the FDE address encoding mode is set to /// `DW_EH_PE_pcrel`. This means that the actual effective address that the /// FDE describes is a relative to the address of the FDE itself. By /// leveraging this relative-ness we can assume that the relative distance /// between the FDE and the function it describes is constant, which should /// allow us to generate an FDE ahead-of-time here. /// /// For now this assumes that all the code of functions will start at a /// page-aligned address when loaded into memory. The eh_frame encoded here /// then assumes that the text section is itself page aligned to its size /// and the eh_frame will follow just after the text section. This means /// that the relative offsets we're using here is the FDE going backwards /// into the text section itself. /// /// Note that the library we're using to create the FDEs, `gimli`, doesn't /// actually encode addresses relative to the FDE itself. Instead the /// addresses are encoded relative to the start of the `.eh_frame` section. /// This makes it much easier for us where we provide the relative offset /// from the start of `.eh_frame` to the function in the text section, which /// given our layout basically means the offset of the function in the text /// section from the end of the text section. /// /// A final note is that the reason we page-align the text section's size is /// so the .eh_frame lives on a separate page from the text section itself. /// This allows `.eh_frame` to have different virtual memory permissions, /// such as being purely read-only instead of read/execute like the code /// bits. fn write_systemv_unwind_info( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, section_id: SectionId, text_section_size: u64, ) { let mut cie = compiler .create_systemv_cie() .expect("must be able to create a CIE for system-v unwind info"); let mut table = FrameTable::default(); cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel; let cie_id = table.add_cie(cie); for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() { let backwards_off = text_section_size - text_section_off; let actual_offset = -i64::try_from(backwards_off).unwrap(); // Note that gimli wants an unsigned 64-bit integer here, but // unwinders just use this constant for a relative addition with the // address of the FDE, which means that the sign doesn't actually // matter. let fde = unwind_info.to_fde(Address::Constant(actual_offset as u64)); table.add_fde(cie_id, fde); } let endian = match compiler.triple().endianness().unwrap() { target_lexicon::Endianness::Little => RunTimeEndian::Little, target_lexicon::Endianness::Big => RunTimeEndian::Big, }; let mut eh_frame = EhFrame(MyVec(EndianVec::new(endian))); table.write_eh_frame(&mut eh_frame).unwrap(); // Some unwinding implementations expect a terminating "empty" length so // a 0 is written at the end of the table for those implementations. let mut endian_vec = (eh_frame.0).0; endian_vec.write_u32(0).unwrap(); obj.append_section_data(section_id, endian_vec.slice(), 1); use gimli::constants; use gimli::write::Error; struct MyVec(EndianVec<RunTimeEndian>); impl Writer for MyVec { type Endian = RunTimeEndian; fn endian(&self) -> RunTimeEndian { self.0.endian() } fn len(&self) -> usize { self.0.len() } fn write(&mut self, buf: &[u8]) -> Result<(), Error> { self.0.write(buf) } fn write_at(&mut self, pos: usize, buf: &[u8]) -> Result<(), Error> { self.0.write_at(pos, buf) } // FIXME(gimli-rs/gimli#576) this is the definition we want for // `write_eh_pointer` but the default implementation, at the time // of this writing, uses `offset - val` instead of `val - offset`. // A PR has been merged to fix this but until that's published we // can't use it. fn write_eh_pointer( &mut self, address: Address, eh_pe: constants::DwEhPe, size: u8, ) -> Result<(), Error> { let val = match address { Address::Constant(val) => val, Address::Symbol { .. } => unreachable!(), }; assert_eq!(eh_pe.application(), constants::DW_EH_PE_pcrel); let offset = self.len() as u64; let val = val.wrapping_sub(offset); self.write_eh_pointer_data(val, eh_pe.format(), size) } } } } fn libcall_name(call: LibCall) -> &'static str { use wasmtime_environ::obj::LibCall as LC; let other = match call { LibCall::FloorF32 => LC::FloorF32, LibCall::FloorF64 => LC::FloorF64, LibCall::NearestF32 => LC::NearestF32, LibCall::NearestF64 => LC::NearestF64, LibCall::CeilF32 => LC::CeilF32, LibCall::CeilF64 => LC::CeilF64, LibCall::TruncF32 => LC::TruncF32, LibCall::TruncF64 => LC::TruncF64, LibCall::FmaF32 => LC::FmaF32, LibCall::FmaF64 => LC::FmaF64, LibCall::X86Pshufb => LC::X86Pshufb, _ => panic!("unknown libcall to give a name to: {call:?}"), }; other.symbol() }
/// within `CompiledFunction` and the return value must be an index where /// the target will be defined by the `n`th call to `append_func`.
random_line_split
obj.rs
//! Object file builder. //! //! Creates ELF image based on `Compilation` information. The ELF contains //! functions and trampolines in the ".text" section. It also contains all //! relocation records for the linking stage. If DWARF sections exist, their //! content will be written as well. //! //! The object file has symbols for each function and trampoline, as well as //! symbols that refer to libcalls. //! //! The function symbol names have format "_wasm_function_N", where N is //! `FuncIndex`. The defined wasm function symbols refer to a JIT compiled //! function body, the imported wasm function do not. The trampolines symbol //! names have format "_trampoline_N", where N is `SignatureIndex`. use crate::{CompiledFuncEnv, CompiledFunction, RelocationTarget}; use anyhow::Result; use cranelift_codegen::binemit::Reloc; use cranelift_codegen::ir::LibCall; use cranelift_codegen::isa::unwind::{systemv, UnwindInfo}; use cranelift_codegen::TextSectionBuilder; use cranelift_control::ControlPlane; use gimli::write::{Address, EhFrame, EndianVec, FrameTable, Writer}; use gimli::RunTimeEndian; use object::write::{Object, SectionId, StandardSegment, Symbol, SymbolId, SymbolSection}; use object::{Architecture, SectionKind, SymbolFlags, SymbolKind, SymbolScope}; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use wasmtime_environ::{Compiler, FuncIndex}; const TEXT_SECTION_NAME: &[u8] = b".text"; /// A helper structure used to assemble the final text section of an exectuable, /// plus unwinding information and other related details. /// /// This builder relies on Cranelift-specific internals but assembles into a /// generic `Object` which will get further appended to in a compiler-agnostic /// fashion later. pub struct ModuleTextBuilder<'a> { /// The target that we're compiling for, used to query target-specific /// information as necessary. compiler: &'a dyn Compiler, /// The object file that we're generating code into. obj: &'a mut Object<'static>, /// The WebAssembly module we're generating code for. text_section: SectionId, unwind_info: UnwindInfoBuilder<'a>, /// In-progress text section that we're using cranelift's `MachBuffer` to /// build to resolve relocations (calls) between functions. text: Box<dyn TextSectionBuilder>, /// Symbols defined in the object for libcalls that relocations are applied /// against. /// /// Note that this isn't typically used. It's only used for SSE-disabled /// builds without SIMD on x86_64 right now. libcall_symbols: HashMap<LibCall, SymbolId>, ctrl_plane: ControlPlane, } impl<'a> ModuleTextBuilder<'a> { /// Creates a new builder for the text section of an executable. /// /// The `.text` section will be appended to the specified `obj` along with /// any unwinding or such information as necessary. The `num_funcs` /// parameter indicates the number of times the `append_func` function will /// be called. The `finish` function will panic if this contract is not met. pub fn new( obj: &'a mut Object<'static>, compiler: &'a dyn Compiler, text: Box<dyn TextSectionBuilder>, ) -> Self { // Entire code (functions and trampolines) will be placed // in the ".text" section. let text_section = obj.add_section( obj.segment_name(StandardSegment::Text).to_vec(), TEXT_SECTION_NAME.to_vec(), SectionKind::Text, ); Self { compiler, obj, text_section, unwind_info: Default::default(), text, libcall_symbols: HashMap::default(), ctrl_plane: ControlPlane::default(), } } /// Appends the `func` specified named `name` to this object. /// /// The `resolve_reloc_target` closure is used to resolve a relocation /// target to an adjacent function which has already been added or will be /// added to this object. The argument is the relocation target specified /// within `CompiledFunction` and the return value must be an index where /// the target will be defined by the `n`th call to `append_func`. /// /// Returns the symbol associated with the function as well as the range /// that the function resides within the text section. pub fn append_func( &mut self, name: &str, compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>, resolve_reloc_target: impl Fn(FuncIndex) -> usize, ) -> (SymbolId, Range<u64>)
/// Forces "veneers" to be used for inter-function calls in the text /// section which means that in-bounds optimized addresses are never used. /// /// This is only useful for debugging cranelift itself and typically this /// option is disabled. pub fn force_veneers(&mut self) { self.text.force_veneers(); } /// Appends the specified amount of bytes of padding into the text section. /// /// This is only useful when fuzzing and/or debugging cranelift itself and /// for production scenarios `padding` is 0 and this function does nothing. pub fn append_padding(&mut self, padding: usize) { if padding == 0 { return; } self.text .append(false, &vec![0; padding], 1, &mut self.ctrl_plane); } /// Indicates that the text section has been written completely and this /// will finish appending it to the original object. /// /// Note that this will also write out the unwind information sections if /// necessary. pub fn finish(mut self) { // Finish up the text section now that we're done adding functions. let text = self.text.finish(&mut self.ctrl_plane); self.obj .section_mut(self.text_section) .set_data(text, self.compiler.page_size_align()); // Append the unwind information for all our functions, if necessary. self.unwind_info .append_section(self.compiler, self.obj, self.text_section); } } /// Builder used to create unwind information for a set of functions added to a /// text section. #[derive(Default)] struct UnwindInfoBuilder<'a> { windows_xdata: Vec<u8>, windows_pdata: Vec<RUNTIME_FUNCTION>, systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>, } // This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here // to ensure everything is always `u32` and to have it available on all // platforms. Note that all of these specifiers here are relative to a "base // address" which we define as the base of where the text section is eventually // loaded. #[allow(non_camel_case_types)] struct RUNTIME_FUNCTION { begin: u32, end: u32, unwind_address: u32, } impl<'a> UnwindInfoBuilder<'a> { /// Pushes the unwind information for a function into this builder. /// /// The function being described must be located at `function_offset` within /// the text section itself, and the function's size is specified by /// `function_len`. /// /// The `info` should come from Cranelift. and is handled here depending on /// its flavor. fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) { match info { // Windows unwind information is stored in two locations: // // * First is the actual unwinding information which is stored // in the `.xdata` section. This is where `info`'s emitted // information will go into. // * Second are pointers to connect all this unwind information, // stored in the `.pdata` section. The `.pdata` section is an // array of `RUNTIME_FUNCTION` structures. // // Due to how these will be loaded at runtime the `.pdata` isn't // actually assembled byte-wise here. Instead that's deferred to // happen later during `write_windows_unwind_info` which will apply // a further offset to `unwind_address`. UnwindInfo::WindowsX64(info) => { let unwind_size = info.emit_size(); let mut unwind_info = vec![0; unwind_size]; info.emit(&mut unwind_info); // `.xdata` entries are always 4-byte aligned // // FIXME: in theory we could "intern" the `unwind_info` value // here within the `.xdata` section. Most of our unwind // information for functions is probably pretty similar in which // case the `.xdata` could be quite small and `.pdata` could // have multiple functions point to the same unwinding // information. while self.windows_xdata.len() % 4 != 0 { self.windows_xdata.push(0x00); } let unwind_address = self.windows_xdata.len(); self.windows_xdata.extend_from_slice(&unwind_info); // Record a `RUNTIME_FUNCTION` which this will point to. self.windows_pdata.push(RUNTIME_FUNCTION { begin: u32::try_from(function_offset).unwrap(), end: u32::try_from(function_offset + function_len).unwrap(), unwind_address: u32::try_from(unwind_address).unwrap(), }); } // System-V is different enough that we just record the unwinding // information to get processed at a later time. UnwindInfo::SystemV(info) => { self.systemv_unwind_info.push((function_offset, info)); } _ => panic!("some unwind info isn't handled here"), } } /// Appends the unwind information section, if any, to the `obj` specified. /// /// This function must be called immediately after the text section was /// added to a builder. The unwind information section must trail the text /// section immediately. /// /// The `text_section`'s section identifier is passed into this function. fn append_section( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, text_section: SectionId, ) { // This write will align the text section to a page boundary and then // return the offset at that point. This gives us the full size of the // text section at that point, after alignment. let text_section_size = obj.append_section_data(text_section, &[], compiler.page_size_align()); if self.windows_xdata.len() > 0 { assert!(self.systemv_unwind_info.len() == 0); // The `.xdata` section must come first to be just-after the `.text` // section for the reasons documented in `write_windows_unwind_info` // below. let segment = obj.segment_name(StandardSegment::Data).to_vec(); let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData); let segment = obj.segment_name(StandardSegment::Data).to_vec(); let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData); self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size); } if self.systemv_unwind_info.len() > 0 { let segment = obj.segment_name(StandardSegment::Data).to_vec(); let section_id = obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData); self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size) } } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into /// the object file itself. This way registration of unwind info can simply /// pass this slice to the OS itself and there's no need to recalculate /// anything on the other end of loading a module from a precompiled object. /// /// Support for reading this is in `crates/jit/src/unwind/winx64.rs`. fn write_windows_unwind_info( &self, obj: &mut Object<'_>, xdata_id: SectionId, pdata_id: SectionId, text_section_size: u64, ) { // Currently the binary format supported here only supports // little-endian for x86_64, or at least that's all where it's tested. // This may need updates for other platforms. assert_eq!(obj.architecture(), Architecture::X86_64); // Append the `.xdata` section, or the actual unwinding information // codes and such which were built as we found unwind information for // functions. obj.append_section_data(xdata_id, &self.windows_xdata, 4); // Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION` // structures stored in the binary. // // This memory will be passed at runtime to `RtlAddFunctionTable` which // takes a "base address" and the entries within `RUNTIME_FUNCTION` are // all relative to this base address. The base address we pass is the // address of the text section itself so all the pointers here must be // text-section-relative. The `begin` and `end` fields for the function // it describes are already text-section-relative, but the // `unwind_address` field needs to be updated here since the value // stored right now is `xdata`-section-relative. We know that the // `xdata` section follows the `.text` section so the // `text_section_size` is added in to calculate the final // `.text`-section-relative address of the unwind information. let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4); for info in self.windows_pdata.iter() { pdata.extend_from_slice(&info.begin.to_le_bytes()); pdata.extend_from_slice(&info.end.to_le_bytes()); let address = text_section_size + u64::from(info.unwind_address); let address = u32::try_from(address).unwrap(); pdata.extend_from_slice(&address.to_le_bytes()); } obj.append_section_data(pdata_id, &pdata, 4); } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This will generate a `.eh_frame` section, but not one that can be /// naively loaded. The goal of this section is that we can create the /// section once here and never again does it need to change. To describe /// dynamically loaded functions though each individual FDE needs to talk /// about the function's absolute address that it's referencing. Naturally /// we don't actually know the function's absolute address when we're /// creating an object here. /// /// To solve this problem the FDE address encoding mode is set to /// `DW_EH_PE_pcrel`. This means that the actual effective address that the /// FDE describes is a relative to the address of the FDE itself. By /// leveraging this relative-ness we can assume that the relative distance /// between the FDE and the function it describes is constant, which should /// allow us to generate an FDE ahead-of-time here. /// /// For now this assumes that all the code of functions will start at a /// page-aligned address when loaded into memory. The eh_frame encoded here /// then assumes that the text section is itself page aligned to its size /// and the eh_frame will follow just after the text section. This means /// that the relative offsets we're using here is the FDE going backwards /// into the text section itself. /// /// Note that the library we're using to create the FDEs, `gimli`, doesn't /// actually encode addresses relative to the FDE itself. Instead the /// addresses are encoded relative to the start of the `.eh_frame` section. /// This makes it much easier for us where we provide the relative offset /// from the start of `.eh_frame` to the function in the text section, which /// given our layout basically means the offset of the function in the text /// section from the end of the text section. /// /// A final note is that the reason we page-align the text section's size is /// so the .eh_frame lives on a separate page from the text section itself. /// This allows `.eh_frame` to have different virtual memory permissions, /// such as being purely read-only instead of read/execute like the code /// bits. fn write_systemv_unwind_info( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, section_id: SectionId, text_section_size: u64, ) { let mut cie = compiler .create_systemv_cie() .expect("must be able to create a CIE for system-v unwind info"); let mut table = FrameTable::default(); cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel; let cie_id = table.add_cie(cie); for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() { let backwards_off = text_section_size - text_section_off; let actual_offset = -i64::try_from(backwards_off).unwrap(); // Note that gimli wants an unsigned 64-bit integer here, but // unwinders just use this constant for a relative addition with the // address of the FDE, which means that the sign doesn't actually // matter. let fde = unwind_info.to_fde(Address::Constant(actual_offset as u64)); table.add_fde(cie_id, fde); } let endian = match compiler.triple().endianness().unwrap() { target_lexicon::Endianness::Little => RunTimeEndian::Little, target_lexicon::Endianness::Big => RunTimeEndian::Big, }; let mut eh_frame = EhFrame(MyVec(EndianVec::new(endian))); table.write_eh_frame(&mut eh_frame).unwrap(); // Some unwinding implementations expect a terminating "empty" length so // a 0 is written at the end of the table for those implementations. let mut endian_vec = (eh_frame.0).0; endian_vec.write_u32(0).unwrap(); obj.append_section_data(section_id, endian_vec.slice(), 1); use gimli::constants; use gimli::write::Error; struct MyVec(EndianVec<RunTimeEndian>); impl Writer for MyVec { type Endian = RunTimeEndian; fn endian(&self) -> RunTimeEndian { self.0.endian() } fn len(&self) -> usize { self.0.len() } fn write(&mut self, buf: &[u8]) -> Result<(), Error> { self.0.write(buf) } fn write_at(&mut self, pos: usize, buf: &[u8]) -> Result<(), Error> { self.0.write_at(pos, buf) } // FIXME(gimli-rs/gimli#576) this is the definition we want for // `write_eh_pointer` but the default implementation, at the time // of this writing, uses `offset - val` instead of `val - offset`. // A PR has been merged to fix this but until that's published we // can't use it. fn write_eh_pointer( &mut self, address: Address, eh_pe: constants::DwEhPe, size: u8, ) -> Result<(), Error> { let val = match address { Address::Constant(val) => val, Address::Symbol { .. } => unreachable!(), }; assert_eq!(eh_pe.application(), constants::DW_EH_PE_pcrel); let offset = self.len() as u64; let val = val.wrapping_sub(offset); self.write_eh_pointer_data(val, eh_pe.format(), size) } } } } fn libcall_name(call: LibCall) -> &'static str { use wasmtime_environ::obj::LibCall as LC; let other = match call { LibCall::FloorF32 => LC::FloorF32, LibCall::FloorF64 => LC::FloorF64, LibCall::NearestF32 => LC::NearestF32, LibCall::NearestF64 => LC::NearestF64, LibCall::CeilF32 => LC::CeilF32, LibCall::CeilF64 => LC::CeilF64, LibCall::TruncF32 => LC::TruncF32, LibCall::TruncF64 => LC::TruncF64, LibCall::FmaF32 => LC::FmaF32, LibCall::FmaF64 => LC::FmaF64, LibCall::X86Pshufb => LC::X86Pshufb, _ => panic!("unknown libcall to give a name to: {call:?}"), }; other.symbol() }
{ let body = compiled_func.buffer.data(); let alignment = compiled_func.alignment; let body_len = body.len() as u64; let off = self .text .append(true, &body, alignment, &mut self.ctrl_plane); let symbol_id = self.obj.add_symbol(Symbol { name: name.as_bytes().to_vec(), value: off, size: body_len, kind: SymbolKind::Text, scope: SymbolScope::Compilation, weak: false, section: SymbolSection::Section(self.text_section), flags: SymbolFlags::None, }); if let Some(info) = compiled_func.unwind_info() { self.unwind_info.push(off, body_len, info); } for r in compiled_func.relocations() { match r.reloc_target { // Relocations against user-defined functions means that this is // a relocation against a module-local function, typically a // call between functions. The `text` field is given priority to // resolve this relocation before we actually emit an object // file, but if it can't handle it then we pass through the // relocation. RelocationTarget::UserFunc(index) => { let target = resolve_reloc_target(index); if self .text .resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target) { continue; } // At this time it's expected that all relocations are // handled by `text.resolve_reloc`, and anything that isn't // handled is a bug in `text.resolve_reloc` or something // transitively there. If truly necessary, though, then this // loop could also be updated to forward the relocation to // the final object file as well. panic!( "unresolved relocation could not be processed against \ {index:?}: {r:?}" ); } // Relocations against libcalls are not common at this time and // are only used in non-default configurations that disable wasm // SIMD, disable SSE features, and for wasm modules that still // use floating point operations. // // Currently these relocations are all expected to be absolute // 8-byte relocations so that's asserted here and then encoded // directly into the object as a normal object relocation. This // is processed at module load time to resolve the relocations. RelocationTarget::LibCall(call) => { let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| { self.obj.add_symbol(Symbol { name: libcall_name(call).as_bytes().to_vec(), value: 0, size: 0, kind: SymbolKind::Text, scope: SymbolScope::Linkage, weak: false, section: SymbolSection::Undefined, flags: SymbolFlags::None, }) }); let (encoding, kind, size) = match r.reloc { Reloc::Abs8 => ( object::RelocationEncoding::Generic, object::RelocationKind::Absolute, 8, ), other => unimplemented!("unimplemented relocation kind {other:?}"), }; self.obj .add_relocation( self.text_section, object::write::Relocation { symbol, size, kind, encoding, offset: off + u64::from(r.offset), addend: r.addend, }, ) .unwrap(); } }; } (symbol_id, off..off + body_len) }
identifier_body
obj.rs
//! Object file builder. //! //! Creates ELF image based on `Compilation` information. The ELF contains //! functions and trampolines in the ".text" section. It also contains all //! relocation records for the linking stage. If DWARF sections exist, their //! content will be written as well. //! //! The object file has symbols for each function and trampoline, as well as //! symbols that refer to libcalls. //! //! The function symbol names have format "_wasm_function_N", where N is //! `FuncIndex`. The defined wasm function symbols refer to a JIT compiled //! function body, the imported wasm function do not. The trampolines symbol //! names have format "_trampoline_N", where N is `SignatureIndex`. use crate::{CompiledFuncEnv, CompiledFunction, RelocationTarget}; use anyhow::Result; use cranelift_codegen::binemit::Reloc; use cranelift_codegen::ir::LibCall; use cranelift_codegen::isa::unwind::{systemv, UnwindInfo}; use cranelift_codegen::TextSectionBuilder; use cranelift_control::ControlPlane; use gimli::write::{Address, EhFrame, EndianVec, FrameTable, Writer}; use gimli::RunTimeEndian; use object::write::{Object, SectionId, StandardSegment, Symbol, SymbolId, SymbolSection}; use object::{Architecture, SectionKind, SymbolFlags, SymbolKind, SymbolScope}; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use wasmtime_environ::{Compiler, FuncIndex}; const TEXT_SECTION_NAME: &[u8] = b".text"; /// A helper structure used to assemble the final text section of an exectuable, /// plus unwinding information and other related details. /// /// This builder relies on Cranelift-specific internals but assembles into a /// generic `Object` which will get further appended to in a compiler-agnostic /// fashion later. pub struct ModuleTextBuilder<'a> { /// The target that we're compiling for, used to query target-specific /// information as necessary. compiler: &'a dyn Compiler, /// The object file that we're generating code into. obj: &'a mut Object<'static>, /// The WebAssembly module we're generating code for. text_section: SectionId, unwind_info: UnwindInfoBuilder<'a>, /// In-progress text section that we're using cranelift's `MachBuffer` to /// build to resolve relocations (calls) between functions. text: Box<dyn TextSectionBuilder>, /// Symbols defined in the object for libcalls that relocations are applied /// against. /// /// Note that this isn't typically used. It's only used for SSE-disabled /// builds without SIMD on x86_64 right now. libcall_symbols: HashMap<LibCall, SymbolId>, ctrl_plane: ControlPlane, } impl<'a> ModuleTextBuilder<'a> { /// Creates a new builder for the text section of an executable. /// /// The `.text` section will be appended to the specified `obj` along with /// any unwinding or such information as necessary. The `num_funcs` /// parameter indicates the number of times the `append_func` function will /// be called. The `finish` function will panic if this contract is not met. pub fn new( obj: &'a mut Object<'static>, compiler: &'a dyn Compiler, text: Box<dyn TextSectionBuilder>, ) -> Self { // Entire code (functions and trampolines) will be placed // in the ".text" section. let text_section = obj.add_section( obj.segment_name(StandardSegment::Text).to_vec(), TEXT_SECTION_NAME.to_vec(), SectionKind::Text, ); Self { compiler, obj, text_section, unwind_info: Default::default(), text, libcall_symbols: HashMap::default(), ctrl_plane: ControlPlane::default(), } } /// Appends the `func` specified named `name` to this object. /// /// The `resolve_reloc_target` closure is used to resolve a relocation /// target to an adjacent function which has already been added or will be /// added to this object. The argument is the relocation target specified /// within `CompiledFunction` and the return value must be an index where /// the target will be defined by the `n`th call to `append_func`. /// /// Returns the symbol associated with the function as well as the range /// that the function resides within the text section. pub fn append_func( &mut self, name: &str, compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>, resolve_reloc_target: impl Fn(FuncIndex) -> usize, ) -> (SymbolId, Range<u64>) { let body = compiled_func.buffer.data(); let alignment = compiled_func.alignment; let body_len = body.len() as u64; let off = self .text .append(true, &body, alignment, &mut self.ctrl_plane); let symbol_id = self.obj.add_symbol(Symbol { name: name.as_bytes().to_vec(), value: off, size: body_len, kind: SymbolKind::Text, scope: SymbolScope::Compilation, weak: false, section: SymbolSection::Section(self.text_section), flags: SymbolFlags::None, }); if let Some(info) = compiled_func.unwind_info() { self.unwind_info.push(off, body_len, info); } for r in compiled_func.relocations() { match r.reloc_target { // Relocations against user-defined functions means that this is // a relocation against a module-local function, typically a // call between functions. The `text` field is given priority to // resolve this relocation before we actually emit an object // file, but if it can't handle it then we pass through the // relocation. RelocationTarget::UserFunc(index) => { let target = resolve_reloc_target(index); if self .text .resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target) { continue; } // At this time it's expected that all relocations are // handled by `text.resolve_reloc`, and anything that isn't // handled is a bug in `text.resolve_reloc` or something // transitively there. If truly necessary, though, then this // loop could also be updated to forward the relocation to // the final object file as well. panic!( "unresolved relocation could not be processed against \ {index:?}: {r:?}" ); } // Relocations against libcalls are not common at this time and // are only used in non-default configurations that disable wasm // SIMD, disable SSE features, and for wasm modules that still // use floating point operations. // // Currently these relocations are all expected to be absolute // 8-byte relocations so that's asserted here and then encoded // directly into the object as a normal object relocation. This // is processed at module load time to resolve the relocations. RelocationTarget::LibCall(call) => { let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| { self.obj.add_symbol(Symbol { name: libcall_name(call).as_bytes().to_vec(), value: 0, size: 0, kind: SymbolKind::Text, scope: SymbolScope::Linkage, weak: false, section: SymbolSection::Undefined, flags: SymbolFlags::None, }) }); let (encoding, kind, size) = match r.reloc { Reloc::Abs8 => ( object::RelocationEncoding::Generic, object::RelocationKind::Absolute, 8, ), other => unimplemented!("unimplemented relocation kind {other:?}"), }; self.obj .add_relocation( self.text_section, object::write::Relocation { symbol, size, kind, encoding, offset: off + u64::from(r.offset), addend: r.addend, }, ) .unwrap(); } }; } (symbol_id, off..off + body_len) } /// Forces "veneers" to be used for inter-function calls in the text /// section which means that in-bounds optimized addresses are never used. /// /// This is only useful for debugging cranelift itself and typically this /// option is disabled. pub fn
(&mut self) { self.text.force_veneers(); } /// Appends the specified amount of bytes of padding into the text section. /// /// This is only useful when fuzzing and/or debugging cranelift itself and /// for production scenarios `padding` is 0 and this function does nothing. pub fn append_padding(&mut self, padding: usize) { if padding == 0 { return; } self.text .append(false, &vec![0; padding], 1, &mut self.ctrl_plane); } /// Indicates that the text section has been written completely and this /// will finish appending it to the original object. /// /// Note that this will also write out the unwind information sections if /// necessary. pub fn finish(mut self) { // Finish up the text section now that we're done adding functions. let text = self.text.finish(&mut self.ctrl_plane); self.obj .section_mut(self.text_section) .set_data(text, self.compiler.page_size_align()); // Append the unwind information for all our functions, if necessary. self.unwind_info .append_section(self.compiler, self.obj, self.text_section); } } /// Builder used to create unwind information for a set of functions added to a /// text section. #[derive(Default)] struct UnwindInfoBuilder<'a> { windows_xdata: Vec<u8>, windows_pdata: Vec<RUNTIME_FUNCTION>, systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>, } // This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here // to ensure everything is always `u32` and to have it available on all // platforms. Note that all of these specifiers here are relative to a "base // address" which we define as the base of where the text section is eventually // loaded. #[allow(non_camel_case_types)] struct RUNTIME_FUNCTION { begin: u32, end: u32, unwind_address: u32, } impl<'a> UnwindInfoBuilder<'a> { /// Pushes the unwind information for a function into this builder. /// /// The function being described must be located at `function_offset` within /// the text section itself, and the function's size is specified by /// `function_len`. /// /// The `info` should come from Cranelift. and is handled here depending on /// its flavor. fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) { match info { // Windows unwind information is stored in two locations: // // * First is the actual unwinding information which is stored // in the `.xdata` section. This is where `info`'s emitted // information will go into. // * Second are pointers to connect all this unwind information, // stored in the `.pdata` section. The `.pdata` section is an // array of `RUNTIME_FUNCTION` structures. // // Due to how these will be loaded at runtime the `.pdata` isn't // actually assembled byte-wise here. Instead that's deferred to // happen later during `write_windows_unwind_info` which will apply // a further offset to `unwind_address`. UnwindInfo::WindowsX64(info) => { let unwind_size = info.emit_size(); let mut unwind_info = vec![0; unwind_size]; info.emit(&mut unwind_info); // `.xdata` entries are always 4-byte aligned // // FIXME: in theory we could "intern" the `unwind_info` value // here within the `.xdata` section. Most of our unwind // information for functions is probably pretty similar in which // case the `.xdata` could be quite small and `.pdata` could // have multiple functions point to the same unwinding // information. while self.windows_xdata.len() % 4 != 0 { self.windows_xdata.push(0x00); } let unwind_address = self.windows_xdata.len(); self.windows_xdata.extend_from_slice(&unwind_info); // Record a `RUNTIME_FUNCTION` which this will point to. self.windows_pdata.push(RUNTIME_FUNCTION { begin: u32::try_from(function_offset).unwrap(), end: u32::try_from(function_offset + function_len).unwrap(), unwind_address: u32::try_from(unwind_address).unwrap(), }); } // System-V is different enough that we just record the unwinding // information to get processed at a later time. UnwindInfo::SystemV(info) => { self.systemv_unwind_info.push((function_offset, info)); } _ => panic!("some unwind info isn't handled here"), } } /// Appends the unwind information section, if any, to the `obj` specified. /// /// This function must be called immediately after the text section was /// added to a builder. The unwind information section must trail the text /// section immediately. /// /// The `text_section`'s section identifier is passed into this function. fn append_section( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, text_section: SectionId, ) { // This write will align the text section to a page boundary and then // return the offset at that point. This gives us the full size of the // text section at that point, after alignment. let text_section_size = obj.append_section_data(text_section, &[], compiler.page_size_align()); if self.windows_xdata.len() > 0 { assert!(self.systemv_unwind_info.len() == 0); // The `.xdata` section must come first to be just-after the `.text` // section for the reasons documented in `write_windows_unwind_info` // below. let segment = obj.segment_name(StandardSegment::Data).to_vec(); let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData); let segment = obj.segment_name(StandardSegment::Data).to_vec(); let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData); self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size); } if self.systemv_unwind_info.len() > 0 { let segment = obj.segment_name(StandardSegment::Data).to_vec(); let section_id = obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData); self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size) } } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into /// the object file itself. This way registration of unwind info can simply /// pass this slice to the OS itself and there's no need to recalculate /// anything on the other end of loading a module from a precompiled object. /// /// Support for reading this is in `crates/jit/src/unwind/winx64.rs`. fn write_windows_unwind_info( &self, obj: &mut Object<'_>, xdata_id: SectionId, pdata_id: SectionId, text_section_size: u64, ) { // Currently the binary format supported here only supports // little-endian for x86_64, or at least that's all where it's tested. // This may need updates for other platforms. assert_eq!(obj.architecture(), Architecture::X86_64); // Append the `.xdata` section, or the actual unwinding information // codes and such which were built as we found unwind information for // functions. obj.append_section_data(xdata_id, &self.windows_xdata, 4); // Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION` // structures stored in the binary. // // This memory will be passed at runtime to `RtlAddFunctionTable` which // takes a "base address" and the entries within `RUNTIME_FUNCTION` are // all relative to this base address. The base address we pass is the // address of the text section itself so all the pointers here must be // text-section-relative. The `begin` and `end` fields for the function // it describes are already text-section-relative, but the // `unwind_address` field needs to be updated here since the value // stored right now is `xdata`-section-relative. We know that the // `xdata` section follows the `.text` section so the // `text_section_size` is added in to calculate the final // `.text`-section-relative address of the unwind information. let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4); for info in self.windows_pdata.iter() { pdata.extend_from_slice(&info.begin.to_le_bytes()); pdata.extend_from_slice(&info.end.to_le_bytes()); let address = text_section_size + u64::from(info.unwind_address); let address = u32::try_from(address).unwrap(); pdata.extend_from_slice(&address.to_le_bytes()); } obj.append_section_data(pdata_id, &pdata, 4); } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This will generate a `.eh_frame` section, but not one that can be /// naively loaded. The goal of this section is that we can create the /// section once here and never again does it need to change. To describe /// dynamically loaded functions though each individual FDE needs to talk /// about the function's absolute address that it's referencing. Naturally /// we don't actually know the function's absolute address when we're /// creating an object here. /// /// To solve this problem the FDE address encoding mode is set to /// `DW_EH_PE_pcrel`. This means that the actual effective address that the /// FDE describes is a relative to the address of the FDE itself. By /// leveraging this relative-ness we can assume that the relative distance /// between the FDE and the function it describes is constant, which should /// allow us to generate an FDE ahead-of-time here. /// /// For now this assumes that all the code of functions will start at a /// page-aligned address when loaded into memory. The eh_frame encoded here /// then assumes that the text section is itself page aligned to its size /// and the eh_frame will follow just after the text section. This means /// that the relative offsets we're using here is the FDE going backwards /// into the text section itself. /// /// Note that the library we're using to create the FDEs, `gimli`, doesn't /// actually encode addresses relative to the FDE itself. Instead the /// addresses are encoded relative to the start of the `.eh_frame` section. /// This makes it much easier for us where we provide the relative offset /// from the start of `.eh_frame` to the function in the text section, which /// given our layout basically means the offset of the function in the text /// section from the end of the text section. /// /// A final note is that the reason we page-align the text section's size is /// so the .eh_frame lives on a separate page from the text section itself. /// This allows `.eh_frame` to have different virtual memory permissions, /// such as being purely read-only instead of read/execute like the code /// bits. fn write_systemv_unwind_info( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, section_id: SectionId, text_section_size: u64, ) { let mut cie = compiler .create_systemv_cie() .expect("must be able to create a CIE for system-v unwind info"); let mut table = FrameTable::default(); cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel; let cie_id = table.add_cie(cie); for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() { let backwards_off = text_section_size - text_section_off; let actual_offset = -i64::try_from(backwards_off).unwrap(); // Note that gimli wants an unsigned 64-bit integer here, but // unwinders just use this constant for a relative addition with the // address of the FDE, which means that the sign doesn't actually // matter. let fde = unwind_info.to_fde(Address::Constant(actual_offset as u64)); table.add_fde(cie_id, fde); } let endian = match compiler.triple().endianness().unwrap() { target_lexicon::Endianness::Little => RunTimeEndian::Little, target_lexicon::Endianness::Big => RunTimeEndian::Big, }; let mut eh_frame = EhFrame(MyVec(EndianVec::new(endian))); table.write_eh_frame(&mut eh_frame).unwrap(); // Some unwinding implementations expect a terminating "empty" length so // a 0 is written at the end of the table for those implementations. let mut endian_vec = (eh_frame.0).0; endian_vec.write_u32(0).unwrap(); obj.append_section_data(section_id, endian_vec.slice(), 1); use gimli::constants; use gimli::write::Error; struct MyVec(EndianVec<RunTimeEndian>); impl Writer for MyVec { type Endian = RunTimeEndian; fn endian(&self) -> RunTimeEndian { self.0.endian() } fn len(&self) -> usize { self.0.len() } fn write(&mut self, buf: &[u8]) -> Result<(), Error> { self.0.write(buf) } fn write_at(&mut self, pos: usize, buf: &[u8]) -> Result<(), Error> { self.0.write_at(pos, buf) } // FIXME(gimli-rs/gimli#576) this is the definition we want for // `write_eh_pointer` but the default implementation, at the time // of this writing, uses `offset - val` instead of `val - offset`. // A PR has been merged to fix this but until that's published we // can't use it. fn write_eh_pointer( &mut self, address: Address, eh_pe: constants::DwEhPe, size: u8, ) -> Result<(), Error> { let val = match address { Address::Constant(val) => val, Address::Symbol { .. } => unreachable!(), }; assert_eq!(eh_pe.application(), constants::DW_EH_PE_pcrel); let offset = self.len() as u64; let val = val.wrapping_sub(offset); self.write_eh_pointer_data(val, eh_pe.format(), size) } } } } fn libcall_name(call: LibCall) -> &'static str { use wasmtime_environ::obj::LibCall as LC; let other = match call { LibCall::FloorF32 => LC::FloorF32, LibCall::FloorF64 => LC::FloorF64, LibCall::NearestF32 => LC::NearestF32, LibCall::NearestF64 => LC::NearestF64, LibCall::CeilF32 => LC::CeilF32, LibCall::CeilF64 => LC::CeilF64, LibCall::TruncF32 => LC::TruncF32, LibCall::TruncF64 => LC::TruncF64, LibCall::FmaF32 => LC::FmaF32, LibCall::FmaF64 => LC::FmaF64, LibCall::X86Pshufb => LC::X86Pshufb, _ => panic!("unknown libcall to give a name to: {call:?}"), }; other.symbol() }
force_veneers
identifier_name
obj.rs
//! Object file builder. //! //! Creates ELF image based on `Compilation` information. The ELF contains //! functions and trampolines in the ".text" section. It also contains all //! relocation records for the linking stage. If DWARF sections exist, their //! content will be written as well. //! //! The object file has symbols for each function and trampoline, as well as //! symbols that refer to libcalls. //! //! The function symbol names have format "_wasm_function_N", where N is //! `FuncIndex`. The defined wasm function symbols refer to a JIT compiled //! function body, the imported wasm function do not. The trampolines symbol //! names have format "_trampoline_N", where N is `SignatureIndex`. use crate::{CompiledFuncEnv, CompiledFunction, RelocationTarget}; use anyhow::Result; use cranelift_codegen::binemit::Reloc; use cranelift_codegen::ir::LibCall; use cranelift_codegen::isa::unwind::{systemv, UnwindInfo}; use cranelift_codegen::TextSectionBuilder; use cranelift_control::ControlPlane; use gimli::write::{Address, EhFrame, EndianVec, FrameTable, Writer}; use gimli::RunTimeEndian; use object::write::{Object, SectionId, StandardSegment, Symbol, SymbolId, SymbolSection}; use object::{Architecture, SectionKind, SymbolFlags, SymbolKind, SymbolScope}; use std::collections::HashMap; use std::convert::TryFrom; use std::ops::Range; use wasmtime_environ::{Compiler, FuncIndex}; const TEXT_SECTION_NAME: &[u8] = b".text"; /// A helper structure used to assemble the final text section of an exectuable, /// plus unwinding information and other related details. /// /// This builder relies on Cranelift-specific internals but assembles into a /// generic `Object` which will get further appended to in a compiler-agnostic /// fashion later. pub struct ModuleTextBuilder<'a> { /// The target that we're compiling for, used to query target-specific /// information as necessary. compiler: &'a dyn Compiler, /// The object file that we're generating code into. obj: &'a mut Object<'static>, /// The WebAssembly module we're generating code for. text_section: SectionId, unwind_info: UnwindInfoBuilder<'a>, /// In-progress text section that we're using cranelift's `MachBuffer` to /// build to resolve relocations (calls) between functions. text: Box<dyn TextSectionBuilder>, /// Symbols defined in the object for libcalls that relocations are applied /// against. /// /// Note that this isn't typically used. It's only used for SSE-disabled /// builds without SIMD on x86_64 right now. libcall_symbols: HashMap<LibCall, SymbolId>, ctrl_plane: ControlPlane, } impl<'a> ModuleTextBuilder<'a> { /// Creates a new builder for the text section of an executable. /// /// The `.text` section will be appended to the specified `obj` along with /// any unwinding or such information as necessary. The `num_funcs` /// parameter indicates the number of times the `append_func` function will /// be called. The `finish` function will panic if this contract is not met. pub fn new( obj: &'a mut Object<'static>, compiler: &'a dyn Compiler, text: Box<dyn TextSectionBuilder>, ) -> Self { // Entire code (functions and trampolines) will be placed // in the ".text" section. let text_section = obj.add_section( obj.segment_name(StandardSegment::Text).to_vec(), TEXT_SECTION_NAME.to_vec(), SectionKind::Text, ); Self { compiler, obj, text_section, unwind_info: Default::default(), text, libcall_symbols: HashMap::default(), ctrl_plane: ControlPlane::default(), } } /// Appends the `func` specified named `name` to this object. /// /// The `resolve_reloc_target` closure is used to resolve a relocation /// target to an adjacent function which has already been added or will be /// added to this object. The argument is the relocation target specified /// within `CompiledFunction` and the return value must be an index where /// the target will be defined by the `n`th call to `append_func`. /// /// Returns the symbol associated with the function as well as the range /// that the function resides within the text section. pub fn append_func( &mut self, name: &str, compiled_func: &'a CompiledFunction<impl CompiledFuncEnv>, resolve_reloc_target: impl Fn(FuncIndex) -> usize, ) -> (SymbolId, Range<u64>) { let body = compiled_func.buffer.data(); let alignment = compiled_func.alignment; let body_len = body.len() as u64; let off = self .text .append(true, &body, alignment, &mut self.ctrl_plane); let symbol_id = self.obj.add_symbol(Symbol { name: name.as_bytes().to_vec(), value: off, size: body_len, kind: SymbolKind::Text, scope: SymbolScope::Compilation, weak: false, section: SymbolSection::Section(self.text_section), flags: SymbolFlags::None, }); if let Some(info) = compiled_func.unwind_info()
for r in compiled_func.relocations() { match r.reloc_target { // Relocations against user-defined functions means that this is // a relocation against a module-local function, typically a // call between functions. The `text` field is given priority to // resolve this relocation before we actually emit an object // file, but if it can't handle it then we pass through the // relocation. RelocationTarget::UserFunc(index) => { let target = resolve_reloc_target(index); if self .text .resolve_reloc(off + u64::from(r.offset), r.reloc, r.addend, target) { continue; } // At this time it's expected that all relocations are // handled by `text.resolve_reloc`, and anything that isn't // handled is a bug in `text.resolve_reloc` or something // transitively there. If truly necessary, though, then this // loop could also be updated to forward the relocation to // the final object file as well. panic!( "unresolved relocation could not be processed against \ {index:?}: {r:?}" ); } // Relocations against libcalls are not common at this time and // are only used in non-default configurations that disable wasm // SIMD, disable SSE features, and for wasm modules that still // use floating point operations. // // Currently these relocations are all expected to be absolute // 8-byte relocations so that's asserted here and then encoded // directly into the object as a normal object relocation. This // is processed at module load time to resolve the relocations. RelocationTarget::LibCall(call) => { let symbol = *self.libcall_symbols.entry(call).or_insert_with(|| { self.obj.add_symbol(Symbol { name: libcall_name(call).as_bytes().to_vec(), value: 0, size: 0, kind: SymbolKind::Text, scope: SymbolScope::Linkage, weak: false, section: SymbolSection::Undefined, flags: SymbolFlags::None, }) }); let (encoding, kind, size) = match r.reloc { Reloc::Abs8 => ( object::RelocationEncoding::Generic, object::RelocationKind::Absolute, 8, ), other => unimplemented!("unimplemented relocation kind {other:?}"), }; self.obj .add_relocation( self.text_section, object::write::Relocation { symbol, size, kind, encoding, offset: off + u64::from(r.offset), addend: r.addend, }, ) .unwrap(); } }; } (symbol_id, off..off + body_len) } /// Forces "veneers" to be used for inter-function calls in the text /// section which means that in-bounds optimized addresses are never used. /// /// This is only useful for debugging cranelift itself and typically this /// option is disabled. pub fn force_veneers(&mut self) { self.text.force_veneers(); } /// Appends the specified amount of bytes of padding into the text section. /// /// This is only useful when fuzzing and/or debugging cranelift itself and /// for production scenarios `padding` is 0 and this function does nothing. pub fn append_padding(&mut self, padding: usize) { if padding == 0 { return; } self.text .append(false, &vec![0; padding], 1, &mut self.ctrl_plane); } /// Indicates that the text section has been written completely and this /// will finish appending it to the original object. /// /// Note that this will also write out the unwind information sections if /// necessary. pub fn finish(mut self) { // Finish up the text section now that we're done adding functions. let text = self.text.finish(&mut self.ctrl_plane); self.obj .section_mut(self.text_section) .set_data(text, self.compiler.page_size_align()); // Append the unwind information for all our functions, if necessary. self.unwind_info .append_section(self.compiler, self.obj, self.text_section); } } /// Builder used to create unwind information for a set of functions added to a /// text section. #[derive(Default)] struct UnwindInfoBuilder<'a> { windows_xdata: Vec<u8>, windows_pdata: Vec<RUNTIME_FUNCTION>, systemv_unwind_info: Vec<(u64, &'a systemv::UnwindInfo)>, } // This is a mirror of `RUNTIME_FUNCTION` in the Windows API, but defined here // to ensure everything is always `u32` and to have it available on all // platforms. Note that all of these specifiers here are relative to a "base // address" which we define as the base of where the text section is eventually // loaded. #[allow(non_camel_case_types)] struct RUNTIME_FUNCTION { begin: u32, end: u32, unwind_address: u32, } impl<'a> UnwindInfoBuilder<'a> { /// Pushes the unwind information for a function into this builder. /// /// The function being described must be located at `function_offset` within /// the text section itself, and the function's size is specified by /// `function_len`. /// /// The `info` should come from Cranelift. and is handled here depending on /// its flavor. fn push(&mut self, function_offset: u64, function_len: u64, info: &'a UnwindInfo) { match info { // Windows unwind information is stored in two locations: // // * First is the actual unwinding information which is stored // in the `.xdata` section. This is where `info`'s emitted // information will go into. // * Second are pointers to connect all this unwind information, // stored in the `.pdata` section. The `.pdata` section is an // array of `RUNTIME_FUNCTION` structures. // // Due to how these will be loaded at runtime the `.pdata` isn't // actually assembled byte-wise here. Instead that's deferred to // happen later during `write_windows_unwind_info` which will apply // a further offset to `unwind_address`. UnwindInfo::WindowsX64(info) => { let unwind_size = info.emit_size(); let mut unwind_info = vec![0; unwind_size]; info.emit(&mut unwind_info); // `.xdata` entries are always 4-byte aligned // // FIXME: in theory we could "intern" the `unwind_info` value // here within the `.xdata` section. Most of our unwind // information for functions is probably pretty similar in which // case the `.xdata` could be quite small and `.pdata` could // have multiple functions point to the same unwinding // information. while self.windows_xdata.len() % 4 != 0 { self.windows_xdata.push(0x00); } let unwind_address = self.windows_xdata.len(); self.windows_xdata.extend_from_slice(&unwind_info); // Record a `RUNTIME_FUNCTION` which this will point to. self.windows_pdata.push(RUNTIME_FUNCTION { begin: u32::try_from(function_offset).unwrap(), end: u32::try_from(function_offset + function_len).unwrap(), unwind_address: u32::try_from(unwind_address).unwrap(), }); } // System-V is different enough that we just record the unwinding // information to get processed at a later time. UnwindInfo::SystemV(info) => { self.systemv_unwind_info.push((function_offset, info)); } _ => panic!("some unwind info isn't handled here"), } } /// Appends the unwind information section, if any, to the `obj` specified. /// /// This function must be called immediately after the text section was /// added to a builder. The unwind information section must trail the text /// section immediately. /// /// The `text_section`'s section identifier is passed into this function. fn append_section( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, text_section: SectionId, ) { // This write will align the text section to a page boundary and then // return the offset at that point. This gives us the full size of the // text section at that point, after alignment. let text_section_size = obj.append_section_data(text_section, &[], compiler.page_size_align()); if self.windows_xdata.len() > 0 { assert!(self.systemv_unwind_info.len() == 0); // The `.xdata` section must come first to be just-after the `.text` // section for the reasons documented in `write_windows_unwind_info` // below. let segment = obj.segment_name(StandardSegment::Data).to_vec(); let xdata_id = obj.add_section(segment, b".xdata".to_vec(), SectionKind::ReadOnlyData); let segment = obj.segment_name(StandardSegment::Data).to_vec(); let pdata_id = obj.add_section(segment, b".pdata".to_vec(), SectionKind::ReadOnlyData); self.write_windows_unwind_info(obj, xdata_id, pdata_id, text_section_size); } if self.systemv_unwind_info.len() > 0 { let segment = obj.segment_name(StandardSegment::Data).to_vec(); let section_id = obj.add_section(segment, b".eh_frame".to_vec(), SectionKind::ReadOnlyData); self.write_systemv_unwind_info(compiler, obj, section_id, text_section_size) } } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This custom section effectively stores a `[RUNTIME_FUNCTION; N]` into /// the object file itself. This way registration of unwind info can simply /// pass this slice to the OS itself and there's no need to recalculate /// anything on the other end of loading a module from a precompiled object. /// /// Support for reading this is in `crates/jit/src/unwind/winx64.rs`. fn write_windows_unwind_info( &self, obj: &mut Object<'_>, xdata_id: SectionId, pdata_id: SectionId, text_section_size: u64, ) { // Currently the binary format supported here only supports // little-endian for x86_64, or at least that's all where it's tested. // This may need updates for other platforms. assert_eq!(obj.architecture(), Architecture::X86_64); // Append the `.xdata` section, or the actual unwinding information // codes and such which were built as we found unwind information for // functions. obj.append_section_data(xdata_id, &self.windows_xdata, 4); // Next append the `.pdata` section, or the array of `RUNTIME_FUNCTION` // structures stored in the binary. // // This memory will be passed at runtime to `RtlAddFunctionTable` which // takes a "base address" and the entries within `RUNTIME_FUNCTION` are // all relative to this base address. The base address we pass is the // address of the text section itself so all the pointers here must be // text-section-relative. The `begin` and `end` fields for the function // it describes are already text-section-relative, but the // `unwind_address` field needs to be updated here since the value // stored right now is `xdata`-section-relative. We know that the // `xdata` section follows the `.text` section so the // `text_section_size` is added in to calculate the final // `.text`-section-relative address of the unwind information. let mut pdata = Vec::with_capacity(self.windows_pdata.len() * 3 * 4); for info in self.windows_pdata.iter() { pdata.extend_from_slice(&info.begin.to_le_bytes()); pdata.extend_from_slice(&info.end.to_le_bytes()); let address = text_section_size + u64::from(info.unwind_address); let address = u32::try_from(address).unwrap(); pdata.extend_from_slice(&address.to_le_bytes()); } obj.append_section_data(pdata_id, &pdata, 4); } /// This function appends a nonstandard section to the object which is only /// used during `CodeMemory::publish`. /// /// This will generate a `.eh_frame` section, but not one that can be /// naively loaded. The goal of this section is that we can create the /// section once here and never again does it need to change. To describe /// dynamically loaded functions though each individual FDE needs to talk /// about the function's absolute address that it's referencing. Naturally /// we don't actually know the function's absolute address when we're /// creating an object here. /// /// To solve this problem the FDE address encoding mode is set to /// `DW_EH_PE_pcrel`. This means that the actual effective address that the /// FDE describes is a relative to the address of the FDE itself. By /// leveraging this relative-ness we can assume that the relative distance /// between the FDE and the function it describes is constant, which should /// allow us to generate an FDE ahead-of-time here. /// /// For now this assumes that all the code of functions will start at a /// page-aligned address when loaded into memory. The eh_frame encoded here /// then assumes that the text section is itself page aligned to its size /// and the eh_frame will follow just after the text section. This means /// that the relative offsets we're using here is the FDE going backwards /// into the text section itself. /// /// Note that the library we're using to create the FDEs, `gimli`, doesn't /// actually encode addresses relative to the FDE itself. Instead the /// addresses are encoded relative to the start of the `.eh_frame` section. /// This makes it much easier for us where we provide the relative offset /// from the start of `.eh_frame` to the function in the text section, which /// given our layout basically means the offset of the function in the text /// section from the end of the text section. /// /// A final note is that the reason we page-align the text section's size is /// so the .eh_frame lives on a separate page from the text section itself. /// This allows `.eh_frame` to have different virtual memory permissions, /// such as being purely read-only instead of read/execute like the code /// bits. fn write_systemv_unwind_info( &self, compiler: &dyn Compiler, obj: &mut Object<'_>, section_id: SectionId, text_section_size: u64, ) { let mut cie = compiler .create_systemv_cie() .expect("must be able to create a CIE for system-v unwind info"); let mut table = FrameTable::default(); cie.fde_address_encoding = gimli::constants::DW_EH_PE_pcrel; let cie_id = table.add_cie(cie); for (text_section_off, unwind_info) in self.systemv_unwind_info.iter() { let backwards_off = text_section_size - text_section_off; let actual_offset = -i64::try_from(backwards_off).unwrap(); // Note that gimli wants an unsigned 64-bit integer here, but // unwinders just use this constant for a relative addition with the // address of the FDE, which means that the sign doesn't actually // matter. let fde = unwind_info.to_fde(Address::Constant(actual_offset as u64)); table.add_fde(cie_id, fde); } let endian = match compiler.triple().endianness().unwrap() { target_lexicon::Endianness::Little => RunTimeEndian::Little, target_lexicon::Endianness::Big => RunTimeEndian::Big, }; let mut eh_frame = EhFrame(MyVec(EndianVec::new(endian))); table.write_eh_frame(&mut eh_frame).unwrap(); // Some unwinding implementations expect a terminating "empty" length so // a 0 is written at the end of the table for those implementations. let mut endian_vec = (eh_frame.0).0; endian_vec.write_u32(0).unwrap(); obj.append_section_data(section_id, endian_vec.slice(), 1); use gimli::constants; use gimli::write::Error; struct MyVec(EndianVec<RunTimeEndian>); impl Writer for MyVec { type Endian = RunTimeEndian; fn endian(&self) -> RunTimeEndian { self.0.endian() } fn len(&self) -> usize { self.0.len() } fn write(&mut self, buf: &[u8]) -> Result<(), Error> { self.0.write(buf) } fn write_at(&mut self, pos: usize, buf: &[u8]) -> Result<(), Error> { self.0.write_at(pos, buf) } // FIXME(gimli-rs/gimli#576) this is the definition we want for // `write_eh_pointer` but the default implementation, at the time // of this writing, uses `offset - val` instead of `val - offset`. // A PR has been merged to fix this but until that's published we // can't use it. fn write_eh_pointer( &mut self, address: Address, eh_pe: constants::DwEhPe, size: u8, ) -> Result<(), Error> { let val = match address { Address::Constant(val) => val, Address::Symbol { .. } => unreachable!(), }; assert_eq!(eh_pe.application(), constants::DW_EH_PE_pcrel); let offset = self.len() as u64; let val = val.wrapping_sub(offset); self.write_eh_pointer_data(val, eh_pe.format(), size) } } } } fn libcall_name(call: LibCall) -> &'static str { use wasmtime_environ::obj::LibCall as LC; let other = match call { LibCall::FloorF32 => LC::FloorF32, LibCall::FloorF64 => LC::FloorF64, LibCall::NearestF32 => LC::NearestF32, LibCall::NearestF64 => LC::NearestF64, LibCall::CeilF32 => LC::CeilF32, LibCall::CeilF64 => LC::CeilF64, LibCall::TruncF32 => LC::TruncF32, LibCall::TruncF64 => LC::TruncF64, LibCall::FmaF32 => LC::FmaF32, LibCall::FmaF64 => LC::FmaF64, LibCall::X86Pshufb => LC::X86Pshufb, _ => panic!("unknown libcall to give a name to: {call:?}"), }; other.symbol() }
{ self.unwind_info.push(off, body_len, info); }
conditional_block
util.rs
use libc; use std::ffi::CStr; use std::io; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use bytes::{BufMut, BytesMut}; use futures::future::Either; use futures::sync::mpsc::Sender; use futures::{Async, Future, IntoFuture, Poll, Sink, Stream}; use net2::TcpBuilder; use resolve::resolver; use slog::{info, o, warn, Drain, Logger}; use tokio::executor::current_thread::spawn; use tokio::net::TcpListener; use tokio::timer::{Delay, Interval}; use crate::task::Task; use crate::Float; use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS}; use bioyino_metric::{name::MetricName, Metric, MetricType}; use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER}; pub fn prepare_log(root: &'static str) -> Logger { // Set logging let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse(); let drain = slog_async::Async::new(filter).build().fuse(); slog::Logger::root(drain, o!("program"=>"test", "test"=>root)) } pub fn try_resolve(s: &str) -> SocketAddr { s.parse().unwrap_or_else(|_| { // for name that have failed to be parsed we try to resolve it via DNS let mut split = s.split(':'); let host = split.next().unwrap(); // Split always has first element let port = split.next().expect("port not found"); let port = port.parse().expect("bad port value"); let first_ip = resolver::resolve_host(host) .unwrap_or_else(|_| panic!("failed resolving {:}", &host)) .next() .expect("at least one IP address required"); SocketAddr::new(first_ip, port) }) } pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> { let builder = TcpBuilder::new_v4()?; builder.bind(addr)?; builder.to_tcp_stream() } pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> { let builder = TcpBuilder::new_v4()?; builder.reuse_address(true)?; builder.bind(addr)?; // backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128 let listener = builder.listen(65536)?; listener.set_nonblocking(true)?; TcpListener::from_std(listener, &tokio::reactor::Handle::default()) } // TODO impl this correctly and use instead of try_resolve // PROFIT: gives libnss-aware behaviour /* fn _try_resolve_nss(name: &str) { use std::io; use std::ffi::CString; use std::ptr::{null_mut, null}; use libc::*; let domain= CString::new(Vec::from(name)).unwrap().into_raw(); let mut result: *mut addrinfo = null_mut(); let success = unsafe { getaddrinfo(domain, null_mut(), null(), &mut result) }; if success != 0 { // let errno = unsafe { *__errno_location() }; println!("{:?}", io::Error::last_os_error()); } else { let mut cur = result; while cur != null_mut() { unsafe{ println!("LEN {:?}", (*result).ai_addrlen); println!("DATA {:?}", (*(*result).ai_addr).sa_data); cur = (*result).ai_next; } } } } */ /// Get hostname. Copypasted from some crate pub fn get_hostname() -> Option<String> { let len = 255; let mut buf = Vec::<u8>::with_capacity(len); let ptr = buf.as_mut_ptr() as *mut libc::c_char; unsafe { if libc::gethostname(ptr, len as libc::size_t) != 0 { return None; } Some(CStr::from_ptr(ptr).to_string_lossy().into_owned()) } } pub fn switch_leader(acquired: bool, log: &Logger) { let should_set = { let state = &*CONSENSUS_STATE.lock().unwrap(); // only set leader when consensus is enabled state == &ConsensusState::Enabled }; if should_set { let is_leader = IS_LEADER.load(Ordering::SeqCst); if is_leader != acquired { warn!(log, "leader state change: {} -> {}", is_leader, acquired); } IS_LEADER.store(acquired, Ordering::SeqCst); } } #[cfg(test)] pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName { let mut intermediate = Vec::new(); intermediate.resize(9000, 0u8); let mode = bioyino_metric::name::TagFormat::Graphite; MetricName::new(s.into(), mode, &mut intermediate).unwrap() } // A future to send own stats. Never gets ready. pub struct OwnStats { interval: u64, prefix: String, timer: Interval, chan: Sender<Task>, log: Logger, } impl OwnStats { pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self { let log = log.new(o!("source"=>"stats")); let now = Instant::now(); let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals Self { interval, prefix, timer: Interval::new(now + dur, dur), chan, log, } } pub fn get_stats(&mut self) { let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics macro_rules! add_metric { ($global:ident, $value:ident, $suffix:expr) => { let $value = $global.swap(0, Ordering::Relaxed) as Float; if self.interval > 0 { buf.put(&self.prefix); buf.put("."); buf.put(&$suffix); let name = MetricName::new_untagged(buf.take()); let metric = Metric::new($value, MetricType::Counter, None, None).unwrap(); let log = self.log.clone(); let sender = self .chan .clone() .send(Task::AddMetric(name, metric)) .map(|_| ()) .map_err(move |_| warn!(log, "stats future could not send metric to task")); spawn(sender); } }; }; add_metric!(EGRESS, egress, "egress"); add_metric!(INGRESS, ingress, "ingress"); add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric"); add_metric!(AGG_ERRORS, agr_errors, "agg-error"); add_metric!(PARSE_ERRORS, parse_errors, "parse-error"); add_metric!(PEER_ERRORS, peer_errors, "peer-error"); add_metric!(DROPS, drops, "drop"); if self.interval > 0 { let s_interval = self.interval as f64 / 1000f64; info!(self.log, "stats"; "egress" => format!("{:2}", egress / s_interval), "ingress" => format!("{:2}", ingress / s_interval), "ingress-m" => format!("{:2}", ingress_m / s_interval), "a-err" => format!("{:2}", agr_errors / s_interval), "p-err" => format!("{:2}", parse_errors / s_interval), "pe-err" => format!("{:2}", peer_errors / s_interval), "drops" => format!("{:2}", drops / s_interval), ); } } } impl Future for OwnStats { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { match self.timer.poll() { Ok(Async::Ready(Some(_))) => { self.get_stats(); } Ok(Async::Ready(None)) => unreachable!(), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => return Err(()), } } } } #[derive(Clone, Debug)] /// Builder for `BackoffRetry`, delays are specified in milliseconds pub struct BackoffRetryBuilder { pub delay: u64, pub delay_mul: f32, pub delay_max: u64, pub retries: usize, } impl Default for BackoffRetryBuilder { fn default() -> Self { Self { delay: 500, delay_mul: 2f32, delay_max: 10000, retries: 25, } } } impl BackoffRetryBuilder { pub fn spawn<F>(self, action: F) -> BackoffRetry<F> where F: IntoFuture + Clone, { let inner = Either::A(action.clone().into_future()); BackoffRetry { action, inner, options: self } } } /// TCP client that is able to reconnect with customizable settings pub struct
<F: IntoFuture> { action: F, inner: Either<F::Future, Delay>, options: BackoffRetryBuilder, } impl<F> Future for BackoffRetry<F> where F: IntoFuture + Clone, { type Item = F::Item; type Error = Option<F::Error>; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { let (rotate_f, rotate_t) = match self.inner { // we are polling a future currently Either::A(ref mut future) => match future.poll() { Ok(Async::Ready(item)) => { return Ok(Async::Ready(item)); } Ok(Async::NotReady) => return Ok(Async::NotReady), Err(e) => { if self.options.retries == 0 { return Err(Some(e)); } else { (true, false) } } }, Either::B(ref mut timer) => { match timer.poll() { // we are waiting for the delay Ok(Async::Ready(())) => (false, true), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => unreachable!(), // timer should not return error } } }; if rotate_f { self.options.retries -= 1; let delay = self.options.delay as f32 * self.options.delay_mul; let delay = if delay <= self.options.delay_max as f32 { delay as u64 } else { self.options.delay_max as u64 }; let delay = Delay::new(Instant::now() + Duration::from_millis(delay)); self.inner = Either::B(delay); } else if rotate_t { self.inner = Either::A(self.action.clone().into_future()); } } } }
BackoffRetry
identifier_name
util.rs
use libc; use std::ffi::CStr; use std::io; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use bytes::{BufMut, BytesMut}; use futures::future::Either; use futures::sync::mpsc::Sender; use futures::{Async, Future, IntoFuture, Poll, Sink, Stream}; use net2::TcpBuilder; use resolve::resolver; use slog::{info, o, warn, Drain, Logger}; use tokio::executor::current_thread::spawn; use tokio::net::TcpListener; use tokio::timer::{Delay, Interval}; use crate::task::Task; use crate::Float; use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS}; use bioyino_metric::{name::MetricName, Metric, MetricType}; use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER}; pub fn prepare_log(root: &'static str) -> Logger { // Set logging let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse(); let drain = slog_async::Async::new(filter).build().fuse(); slog::Logger::root(drain, o!("program"=>"test", "test"=>root)) } pub fn try_resolve(s: &str) -> SocketAddr { s.parse().unwrap_or_else(|_| { // for name that have failed to be parsed we try to resolve it via DNS let mut split = s.split(':'); let host = split.next().unwrap(); // Split always has first element let port = split.next().expect("port not found"); let port = port.parse().expect("bad port value"); let first_ip = resolver::resolve_host(host) .unwrap_or_else(|_| panic!("failed resolving {:}", &host)) .next() .expect("at least one IP address required"); SocketAddr::new(first_ip, port) }) } pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> { let builder = TcpBuilder::new_v4()?; builder.bind(addr)?; builder.to_tcp_stream() } pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> { let builder = TcpBuilder::new_v4()?; builder.reuse_address(true)?; builder.bind(addr)?; // backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128 let listener = builder.listen(65536)?; listener.set_nonblocking(true)?; TcpListener::from_std(listener, &tokio::reactor::Handle::default()) } // TODO impl this correctly and use instead of try_resolve // PROFIT: gives libnss-aware behaviour /* fn _try_resolve_nss(name: &str) { use std::io; use std::ffi::CString; use std::ptr::{null_mut, null}; use libc::*; let domain= CString::new(Vec::from(name)).unwrap().into_raw(); let mut result: *mut addrinfo = null_mut(); let success = unsafe { getaddrinfo(domain, null_mut(), null(), &mut result) }; if success != 0 { // let errno = unsafe { *__errno_location() }; println!("{:?}", io::Error::last_os_error()); } else { let mut cur = result; while cur != null_mut() { unsafe{ println!("LEN {:?}", (*result).ai_addrlen); println!("DATA {:?}", (*(*result).ai_addr).sa_data); cur = (*result).ai_next; } } } } */ /// Get hostname. Copypasted from some crate pub fn get_hostname() -> Option<String> { let len = 255; let mut buf = Vec::<u8>::with_capacity(len); let ptr = buf.as_mut_ptr() as *mut libc::c_char; unsafe { if libc::gethostname(ptr, len as libc::size_t) != 0 { return None; } Some(CStr::from_ptr(ptr).to_string_lossy().into_owned()) } } pub fn switch_leader(acquired: bool, log: &Logger) { let should_set = { let state = &*CONSENSUS_STATE.lock().unwrap(); // only set leader when consensus is enabled state == &ConsensusState::Enabled }; if should_set { let is_leader = IS_LEADER.load(Ordering::SeqCst); if is_leader != acquired { warn!(log, "leader state change: {} -> {}", is_leader, acquired); } IS_LEADER.store(acquired, Ordering::SeqCst); } } #[cfg(test)] pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName { let mut intermediate = Vec::new(); intermediate.resize(9000, 0u8); let mode = bioyino_metric::name::TagFormat::Graphite; MetricName::new(s.into(), mode, &mut intermediate).unwrap() } // A future to send own stats. Never gets ready. pub struct OwnStats { interval: u64, prefix: String, timer: Interval, chan: Sender<Task>, log: Logger, } impl OwnStats { pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self { let log = log.new(o!("source"=>"stats")); let now = Instant::now(); let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals Self { interval, prefix, timer: Interval::new(now + dur, dur), chan, log, } } pub fn get_stats(&mut self) { let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics macro_rules! add_metric { ($global:ident, $value:ident, $suffix:expr) => { let $value = $global.swap(0, Ordering::Relaxed) as Float; if self.interval > 0 { buf.put(&self.prefix); buf.put("."); buf.put(&$suffix); let name = MetricName::new_untagged(buf.take()); let metric = Metric::new($value, MetricType::Counter, None, None).unwrap(); let log = self.log.clone(); let sender = self .chan .clone() .send(Task::AddMetric(name, metric)) .map(|_| ()) .map_err(move |_| warn!(log, "stats future could not send metric to task")); spawn(sender); } }; }; add_metric!(EGRESS, egress, "egress"); add_metric!(INGRESS, ingress, "ingress");
if self.interval > 0 { let s_interval = self.interval as f64 / 1000f64; info!(self.log, "stats"; "egress" => format!("{:2}", egress / s_interval), "ingress" => format!("{:2}", ingress / s_interval), "ingress-m" => format!("{:2}", ingress_m / s_interval), "a-err" => format!("{:2}", agr_errors / s_interval), "p-err" => format!("{:2}", parse_errors / s_interval), "pe-err" => format!("{:2}", peer_errors / s_interval), "drops" => format!("{:2}", drops / s_interval), ); } } } impl Future for OwnStats { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { match self.timer.poll() { Ok(Async::Ready(Some(_))) => { self.get_stats(); } Ok(Async::Ready(None)) => unreachable!(), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => return Err(()), } } } } #[derive(Clone, Debug)] /// Builder for `BackoffRetry`, delays are specified in milliseconds pub struct BackoffRetryBuilder { pub delay: u64, pub delay_mul: f32, pub delay_max: u64, pub retries: usize, } impl Default for BackoffRetryBuilder { fn default() -> Self { Self { delay: 500, delay_mul: 2f32, delay_max: 10000, retries: 25, } } } impl BackoffRetryBuilder { pub fn spawn<F>(self, action: F) -> BackoffRetry<F> where F: IntoFuture + Clone, { let inner = Either::A(action.clone().into_future()); BackoffRetry { action, inner, options: self } } } /// TCP client that is able to reconnect with customizable settings pub struct BackoffRetry<F: IntoFuture> { action: F, inner: Either<F::Future, Delay>, options: BackoffRetryBuilder, } impl<F> Future for BackoffRetry<F> where F: IntoFuture + Clone, { type Item = F::Item; type Error = Option<F::Error>; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { let (rotate_f, rotate_t) = match self.inner { // we are polling a future currently Either::A(ref mut future) => match future.poll() { Ok(Async::Ready(item)) => { return Ok(Async::Ready(item)); } Ok(Async::NotReady) => return Ok(Async::NotReady), Err(e) => { if self.options.retries == 0 { return Err(Some(e)); } else { (true, false) } } }, Either::B(ref mut timer) => { match timer.poll() { // we are waiting for the delay Ok(Async::Ready(())) => (false, true), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => unreachable!(), // timer should not return error } } }; if rotate_f { self.options.retries -= 1; let delay = self.options.delay as f32 * self.options.delay_mul; let delay = if delay <= self.options.delay_max as f32 { delay as u64 } else { self.options.delay_max as u64 }; let delay = Delay::new(Instant::now() + Duration::from_millis(delay)); self.inner = Either::B(delay); } else if rotate_t { self.inner = Either::A(self.action.clone().into_future()); } } } }
add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric"); add_metric!(AGG_ERRORS, agr_errors, "agg-error"); add_metric!(PARSE_ERRORS, parse_errors, "parse-error"); add_metric!(PEER_ERRORS, peer_errors, "peer-error"); add_metric!(DROPS, drops, "drop");
random_line_split
util.rs
use libc; use std::ffi::CStr; use std::io; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use bytes::{BufMut, BytesMut}; use futures::future::Either; use futures::sync::mpsc::Sender; use futures::{Async, Future, IntoFuture, Poll, Sink, Stream}; use net2::TcpBuilder; use resolve::resolver; use slog::{info, o, warn, Drain, Logger}; use tokio::executor::current_thread::spawn; use tokio::net::TcpListener; use tokio::timer::{Delay, Interval}; use crate::task::Task; use crate::Float; use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS}; use bioyino_metric::{name::MetricName, Metric, MetricType}; use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER}; pub fn prepare_log(root: &'static str) -> Logger { // Set logging let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse(); let drain = slog_async::Async::new(filter).build().fuse(); slog::Logger::root(drain, o!("program"=>"test", "test"=>root)) } pub fn try_resolve(s: &str) -> SocketAddr { s.parse().unwrap_or_else(|_| { // for name that have failed to be parsed we try to resolve it via DNS let mut split = s.split(':'); let host = split.next().unwrap(); // Split always has first element let port = split.next().expect("port not found"); let port = port.parse().expect("bad port value"); let first_ip = resolver::resolve_host(host) .unwrap_or_else(|_| panic!("failed resolving {:}", &host)) .next() .expect("at least one IP address required"); SocketAddr::new(first_ip, port) }) } pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> { let builder = TcpBuilder::new_v4()?; builder.bind(addr)?; builder.to_tcp_stream() } pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> { let builder = TcpBuilder::new_v4()?; builder.reuse_address(true)?; builder.bind(addr)?; // backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128 let listener = builder.listen(65536)?; listener.set_nonblocking(true)?; TcpListener::from_std(listener, &tokio::reactor::Handle::default()) } // TODO impl this correctly and use instead of try_resolve // PROFIT: gives libnss-aware behaviour /* fn _try_resolve_nss(name: &str) { use std::io; use std::ffi::CString; use std::ptr::{null_mut, null}; use libc::*; let domain= CString::new(Vec::from(name)).unwrap().into_raw(); let mut result: *mut addrinfo = null_mut(); let success = unsafe { getaddrinfo(domain, null_mut(), null(), &mut result) }; if success != 0 { // let errno = unsafe { *__errno_location() }; println!("{:?}", io::Error::last_os_error()); } else { let mut cur = result; while cur != null_mut() { unsafe{ println!("LEN {:?}", (*result).ai_addrlen); println!("DATA {:?}", (*(*result).ai_addr).sa_data); cur = (*result).ai_next; } } } } */ /// Get hostname. Copypasted from some crate pub fn get_hostname() -> Option<String> { let len = 255; let mut buf = Vec::<u8>::with_capacity(len); let ptr = buf.as_mut_ptr() as *mut libc::c_char; unsafe { if libc::gethostname(ptr, len as libc::size_t) != 0 { return None; } Some(CStr::from_ptr(ptr).to_string_lossy().into_owned()) } } pub fn switch_leader(acquired: bool, log: &Logger) { let should_set = { let state = &*CONSENSUS_STATE.lock().unwrap(); // only set leader when consensus is enabled state == &ConsensusState::Enabled }; if should_set
} #[cfg(test)] pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName { let mut intermediate = Vec::new(); intermediate.resize(9000, 0u8); let mode = bioyino_metric::name::TagFormat::Graphite; MetricName::new(s.into(), mode, &mut intermediate).unwrap() } // A future to send own stats. Never gets ready. pub struct OwnStats { interval: u64, prefix: String, timer: Interval, chan: Sender<Task>, log: Logger, } impl OwnStats { pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self { let log = log.new(o!("source"=>"stats")); let now = Instant::now(); let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals Self { interval, prefix, timer: Interval::new(now + dur, dur), chan, log, } } pub fn get_stats(&mut self) { let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics macro_rules! add_metric { ($global:ident, $value:ident, $suffix:expr) => { let $value = $global.swap(0, Ordering::Relaxed) as Float; if self.interval > 0 { buf.put(&self.prefix); buf.put("."); buf.put(&$suffix); let name = MetricName::new_untagged(buf.take()); let metric = Metric::new($value, MetricType::Counter, None, None).unwrap(); let log = self.log.clone(); let sender = self .chan .clone() .send(Task::AddMetric(name, metric)) .map(|_| ()) .map_err(move |_| warn!(log, "stats future could not send metric to task")); spawn(sender); } }; }; add_metric!(EGRESS, egress, "egress"); add_metric!(INGRESS, ingress, "ingress"); add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric"); add_metric!(AGG_ERRORS, agr_errors, "agg-error"); add_metric!(PARSE_ERRORS, parse_errors, "parse-error"); add_metric!(PEER_ERRORS, peer_errors, "peer-error"); add_metric!(DROPS, drops, "drop"); if self.interval > 0 { let s_interval = self.interval as f64 / 1000f64; info!(self.log, "stats"; "egress" => format!("{:2}", egress / s_interval), "ingress" => format!("{:2}", ingress / s_interval), "ingress-m" => format!("{:2}", ingress_m / s_interval), "a-err" => format!("{:2}", agr_errors / s_interval), "p-err" => format!("{:2}", parse_errors / s_interval), "pe-err" => format!("{:2}", peer_errors / s_interval), "drops" => format!("{:2}", drops / s_interval), ); } } } impl Future for OwnStats { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { match self.timer.poll() { Ok(Async::Ready(Some(_))) => { self.get_stats(); } Ok(Async::Ready(None)) => unreachable!(), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => return Err(()), } } } } #[derive(Clone, Debug)] /// Builder for `BackoffRetry`, delays are specified in milliseconds pub struct BackoffRetryBuilder { pub delay: u64, pub delay_mul: f32, pub delay_max: u64, pub retries: usize, } impl Default for BackoffRetryBuilder { fn default() -> Self { Self { delay: 500, delay_mul: 2f32, delay_max: 10000, retries: 25, } } } impl BackoffRetryBuilder { pub fn spawn<F>(self, action: F) -> BackoffRetry<F> where F: IntoFuture + Clone, { let inner = Either::A(action.clone().into_future()); BackoffRetry { action, inner, options: self } } } /// TCP client that is able to reconnect with customizable settings pub struct BackoffRetry<F: IntoFuture> { action: F, inner: Either<F::Future, Delay>, options: BackoffRetryBuilder, } impl<F> Future for BackoffRetry<F> where F: IntoFuture + Clone, { type Item = F::Item; type Error = Option<F::Error>; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { let (rotate_f, rotate_t) = match self.inner { // we are polling a future currently Either::A(ref mut future) => match future.poll() { Ok(Async::Ready(item)) => { return Ok(Async::Ready(item)); } Ok(Async::NotReady) => return Ok(Async::NotReady), Err(e) => { if self.options.retries == 0 { return Err(Some(e)); } else { (true, false) } } }, Either::B(ref mut timer) => { match timer.poll() { // we are waiting for the delay Ok(Async::Ready(())) => (false, true), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => unreachable!(), // timer should not return error } } }; if rotate_f { self.options.retries -= 1; let delay = self.options.delay as f32 * self.options.delay_mul; let delay = if delay <= self.options.delay_max as f32 { delay as u64 } else { self.options.delay_max as u64 }; let delay = Delay::new(Instant::now() + Duration::from_millis(delay)); self.inner = Either::B(delay); } else if rotate_t { self.inner = Either::A(self.action.clone().into_future()); } } } }
{ let is_leader = IS_LEADER.load(Ordering::SeqCst); if is_leader != acquired { warn!(log, "leader state change: {} -> {}", is_leader, acquired); } IS_LEADER.store(acquired, Ordering::SeqCst); }
conditional_block
util.rs
use libc; use std::ffi::CStr; use std::io; use std::net::SocketAddr; use std::net::TcpStream as StdTcpStream; use std::sync::atomic::Ordering; use std::time::{Duration, Instant}; use bytes::{BufMut, BytesMut}; use futures::future::Either; use futures::sync::mpsc::Sender; use futures::{Async, Future, IntoFuture, Poll, Sink, Stream}; use net2::TcpBuilder; use resolve::resolver; use slog::{info, o, warn, Drain, Logger}; use tokio::executor::current_thread::spawn; use tokio::net::TcpListener; use tokio::timer::{Delay, Interval}; use crate::task::Task; use crate::Float; use crate::{AGG_ERRORS, DROPS, EGRESS, INGRESS, INGRESS_METRICS, PARSE_ERRORS, PEER_ERRORS}; use bioyino_metric::{name::MetricName, Metric, MetricType}; use crate::{ConsensusState, CONSENSUS_STATE, IS_LEADER}; pub fn prepare_log(root: &'static str) -> Logger { // Set logging let decorator = slog_term::TermDecorator::new().build(); let drain = slog_term::FullFormat::new(decorator).build().fuse(); let filter = slog::LevelFilter::new(drain, slog::Level::Trace).fuse(); let drain = slog_async::Async::new(filter).build().fuse(); slog::Logger::root(drain, o!("program"=>"test", "test"=>root)) } pub fn try_resolve(s: &str) -> SocketAddr { s.parse().unwrap_or_else(|_| { // for name that have failed to be parsed we try to resolve it via DNS let mut split = s.split(':'); let host = split.next().unwrap(); // Split always has first element let port = split.next().expect("port not found"); let port = port.parse().expect("bad port value"); let first_ip = resolver::resolve_host(host) .unwrap_or_else(|_| panic!("failed resolving {:}", &host)) .next() .expect("at least one IP address required"); SocketAddr::new(first_ip, port) }) } pub fn bound_stream(addr: &SocketAddr) -> Result<StdTcpStream, io::Error> { let builder = TcpBuilder::new_v4()?; builder.bind(addr)?; builder.to_tcp_stream() } pub fn reusing_listener(addr: &SocketAddr) -> Result<TcpListener, io::Error> { let builder = TcpBuilder::new_v4()?; builder.reuse_address(true)?; builder.bind(addr)?; // backlog parameter will be limited by SOMAXCONN on Linux, which is usually set to 128 let listener = builder.listen(65536)?; listener.set_nonblocking(true)?; TcpListener::from_std(listener, &tokio::reactor::Handle::default()) } // TODO impl this correctly and use instead of try_resolve // PROFIT: gives libnss-aware behaviour /* fn _try_resolve_nss(name: &str) { use std::io; use std::ffi::CString; use std::ptr::{null_mut, null}; use libc::*; let domain= CString::new(Vec::from(name)).unwrap().into_raw(); let mut result: *mut addrinfo = null_mut(); let success = unsafe { getaddrinfo(domain, null_mut(), null(), &mut result) }; if success != 0 { // let errno = unsafe { *__errno_location() }; println!("{:?}", io::Error::last_os_error()); } else { let mut cur = result; while cur != null_mut() { unsafe{ println!("LEN {:?}", (*result).ai_addrlen); println!("DATA {:?}", (*(*result).ai_addr).sa_data); cur = (*result).ai_next; } } } } */ /// Get hostname. Copypasted from some crate pub fn get_hostname() -> Option<String> { let len = 255; let mut buf = Vec::<u8>::with_capacity(len); let ptr = buf.as_mut_ptr() as *mut libc::c_char; unsafe { if libc::gethostname(ptr, len as libc::size_t) != 0 { return None; } Some(CStr::from_ptr(ptr).to_string_lossy().into_owned()) } } pub fn switch_leader(acquired: bool, log: &Logger) { let should_set = { let state = &*CONSENSUS_STATE.lock().unwrap(); // only set leader when consensus is enabled state == &ConsensusState::Enabled }; if should_set { let is_leader = IS_LEADER.load(Ordering::SeqCst); if is_leader != acquired { warn!(log, "leader state change: {} -> {}", is_leader, acquired); } IS_LEADER.store(acquired, Ordering::SeqCst); } } #[cfg(test)] pub(crate) fn new_test_graphite_name(s: &'static str) -> MetricName { let mut intermediate = Vec::new(); intermediate.resize(9000, 0u8); let mode = bioyino_metric::name::TagFormat::Graphite; MetricName::new(s.into(), mode, &mut intermediate).unwrap() } // A future to send own stats. Never gets ready. pub struct OwnStats { interval: u64, prefix: String, timer: Interval, chan: Sender<Task>, log: Logger, } impl OwnStats { pub fn new(interval: u64, prefix: String, chan: Sender<Task>, log: Logger) -> Self { let log = log.new(o!("source"=>"stats")); let now = Instant::now(); let dur = Duration::from_millis(if interval < 100 { 1000 } else { interval }); // exclude too small intervals Self { interval, prefix, timer: Interval::new(now + dur, dur), chan, log, } } pub fn get_stats(&mut self) { let mut buf = BytesMut::with_capacity((self.prefix.len() + 10) * 7); // 10 is suffix len, 7 is number of metrics macro_rules! add_metric { ($global:ident, $value:ident, $suffix:expr) => { let $value = $global.swap(0, Ordering::Relaxed) as Float; if self.interval > 0 { buf.put(&self.prefix); buf.put("."); buf.put(&$suffix); let name = MetricName::new_untagged(buf.take()); let metric = Metric::new($value, MetricType::Counter, None, None).unwrap(); let log = self.log.clone(); let sender = self .chan .clone() .send(Task::AddMetric(name, metric)) .map(|_| ()) .map_err(move |_| warn!(log, "stats future could not send metric to task")); spawn(sender); } }; }; add_metric!(EGRESS, egress, "egress"); add_metric!(INGRESS, ingress, "ingress"); add_metric!(INGRESS_METRICS, ingress_m, "ingress-metric"); add_metric!(AGG_ERRORS, agr_errors, "agg-error"); add_metric!(PARSE_ERRORS, parse_errors, "parse-error"); add_metric!(PEER_ERRORS, peer_errors, "peer-error"); add_metric!(DROPS, drops, "drop"); if self.interval > 0 { let s_interval = self.interval as f64 / 1000f64; info!(self.log, "stats"; "egress" => format!("{:2}", egress / s_interval), "ingress" => format!("{:2}", ingress / s_interval), "ingress-m" => format!("{:2}", ingress_m / s_interval), "a-err" => format!("{:2}", agr_errors / s_interval), "p-err" => format!("{:2}", parse_errors / s_interval), "pe-err" => format!("{:2}", peer_errors / s_interval), "drops" => format!("{:2}", drops / s_interval), ); } } } impl Future for OwnStats { type Item = (); type Error = (); fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { match self.timer.poll() { Ok(Async::Ready(Some(_))) => { self.get_stats(); } Ok(Async::Ready(None)) => unreachable!(), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => return Err(()), } } } } #[derive(Clone, Debug)] /// Builder for `BackoffRetry`, delays are specified in milliseconds pub struct BackoffRetryBuilder { pub delay: u64, pub delay_mul: f32, pub delay_max: u64, pub retries: usize, } impl Default for BackoffRetryBuilder { fn default() -> Self
} impl BackoffRetryBuilder { pub fn spawn<F>(self, action: F) -> BackoffRetry<F> where F: IntoFuture + Clone, { let inner = Either::A(action.clone().into_future()); BackoffRetry { action, inner, options: self } } } /// TCP client that is able to reconnect with customizable settings pub struct BackoffRetry<F: IntoFuture> { action: F, inner: Either<F::Future, Delay>, options: BackoffRetryBuilder, } impl<F> Future for BackoffRetry<F> where F: IntoFuture + Clone, { type Item = F::Item; type Error = Option<F::Error>; fn poll(&mut self) -> Poll<Self::Item, Self::Error> { loop { let (rotate_f, rotate_t) = match self.inner { // we are polling a future currently Either::A(ref mut future) => match future.poll() { Ok(Async::Ready(item)) => { return Ok(Async::Ready(item)); } Ok(Async::NotReady) => return Ok(Async::NotReady), Err(e) => { if self.options.retries == 0 { return Err(Some(e)); } else { (true, false) } } }, Either::B(ref mut timer) => { match timer.poll() { // we are waiting for the delay Ok(Async::Ready(())) => (false, true), Ok(Async::NotReady) => return Ok(Async::NotReady), Err(_) => unreachable!(), // timer should not return error } } }; if rotate_f { self.options.retries -= 1; let delay = self.options.delay as f32 * self.options.delay_mul; let delay = if delay <= self.options.delay_max as f32 { delay as u64 } else { self.options.delay_max as u64 }; let delay = Delay::new(Instant::now() + Duration::from_millis(delay)); self.inner = Either::B(delay); } else if rotate_t { self.inner = Either::A(self.action.clone().into_future()); } } } }
{ Self { delay: 500, delay_mul: 2f32, delay_max: 10000, retries: 25, } }
identifier_body
listing.rs
use actix_web::http::StatusCode; use actix_web::{fs, http, Body, FromRequest, HttpRequest, HttpResponse, Query, Result}; use bytesize::ByteSize; use futures::stream::once; use htmlescape::encode_minimal as escape_html_entity; use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET}; use serde::Deserialize; use std::io; use std::path::{Path, PathBuf}; use std::time::SystemTime; use strum_macros::{Display, EnumString}; use crate::archive::CompressionMethod; use crate::errors::{self, ContextualError}; use crate::renderer; use crate::themes::ColorScheme; /// Query parameters #[derive(Deserialize)] pub struct QueryParameters { pub path: Option<PathBuf>, pub sort: Option<SortingMethod>, pub order: Option<SortingOrder>, pub theme: Option<ColorScheme>, download: Option<CompressionMethod>, } /// Available sorting methods #[derive(Deserialize, Clone, EnumString, Display, Copy)] #[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] pub enum SortingMethod { /// Sort by name Name, /// Sort by size Size, /// Sort by last modification date (natural sort: follows alphanumerical order) Date, } /// Available sorting orders #[derive(Deserialize, Clone, EnumString, Display, Copy)] pub enum SortingOrder { /// Ascending order #[serde(alias = "asc")] #[strum(serialize = "asc")] Ascending, /// Descending order #[serde(alias = "desc")] #[strum(serialize = "desc")] Descending, } #[derive(PartialEq)] /// Possible entry types pub enum EntryType { /// Entry is a directory Directory, /// Entry is a file File, /// Entry is a symlink Symlink, } /// Entry pub struct Entry { /// Name of the entry pub name: String, /// Type of the entry pub entry_type: EntryType, /// URL of the entry pub link: String, /// Size in byte of the entry. Only available for EntryType::File pub size: Option<bytesize::ByteSize>, /// Last modification date pub last_modification_date: Option<SystemTime>, } impl Entry { fn new( name: String, entry_type: EntryType, link: String, size: Option<bytesize::ByteSize>, last_modification_date: Option<SystemTime>, ) -> Self { Entry { name, entry_type, link, size, last_modification_date, } } /// Returns whether the entry is a directory pub fn
(&self) -> bool { self.entry_type == EntryType::Directory } /// Returns whether the entry is a file pub fn is_file(&self) -> bool { self.entry_type == EntryType::File } /// Returns whether the entry is a symlink pub fn is_symlink(&self) -> bool { self.entry_type == EntryType::Symlink } // Returns whether the entry is a video pub fn is_video(&self) -> bool { let video_extensions = vec!["mp4", "ogv", "avi", "mkv"]; self.entry_type == EntryType::File && self.extension() .map(|ext| video_extensions.contains(&ext.as_str())) .unwrap_or(false) } // Returns whether the entry is an audio file pub fn is_audio(&self) -> bool { let audio_extensions = vec!["ogg", "mp3", "aac", "flac", "wav", "m4a"]; self.entry_type == EntryType::File && self.extension() .map(|ext| audio_extensions.contains(&ext.as_str())) .unwrap_or(false) } fn extension(&self) -> Option<String> { std::path::PathBuf::from(&self.name).extension().and_then(|s| s.to_str()).map(|s| s.to_string()) } } pub fn file_handler(req: &HttpRequest<crate::MiniserveConfig>) -> Result<fs::NamedFile> { let path = &req.state().path; Ok(fs::NamedFile::open(path)?) } /// List a directory and renders a HTML file accordingly /// Adapted from https://docs.rs/actix-web/0.7.13/src/actix_web/fs.rs.html#564 #[allow(clippy::identity_conversion)] pub fn directory_listing<S>( dir: &fs::Directory, req: &HttpRequest<S>, skip_symlinks: bool, file_upload: bool, random_route: Option<String>, default_color_scheme: ColorScheme, upload_route: String, ) -> Result<HttpResponse, io::Error> { let serve_path = req.path(); let base = Path::new(serve_path); let random_route = format!("/{}", random_route.unwrap_or_default()); let is_root = base.parent().is_none() || req.path() == random_route; let page_parent = base.parent().map(|p| p.display().to_string()); let current_dir = match base.strip_prefix(random_route) { Ok(c_d) => Path::new("/").join(c_d), Err(_) => base.to_path_buf(), }; let query_params = extract_query_parameters(req); let mut entries: Vec<Entry> = Vec::new(); for entry in dir.path.read_dir()? { if dir.is_visible(&entry) { let entry = entry?; let p = match entry.path().strip_prefix(&dir.path) { Ok(p) => base.join(p), Err(_) => continue, }; // show file url as relative to static path let file_url = utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET).to_string(); // " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; let file_name = escape_html_entity(&entry.file_name().to_string_lossy()); // if file is a directory, add '/' to the end of the name if let Ok(metadata) = entry.metadata() { if skip_symlinks && metadata.file_type().is_symlink() { continue; } let last_modification_date = match metadata.modified() { Ok(date) => Some(date), Err(_) => None, }; if metadata.file_type().is_symlink() { entries.push(Entry::new( file_name, EntryType::Symlink, file_url, None, last_modification_date, )); } else if metadata.is_dir() { entries.push(Entry::new( file_name, EntryType::Directory, file_url, None, last_modification_date, )); } else { entries.push(Entry::new( file_name, EntryType::File, file_url, Some(ByteSize::b(metadata.len())), last_modification_date, )); } } else { continue; } } } if let Some(sorting_method) = query_params.sort { match sorting_method { SortingMethod::Name => entries .sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone())), SortingMethod::Size => entries.sort_by(|e1, e2| { // If we can't get the size of the entry (directory for instance) // let's consider it's 0b e2.size .unwrap_or_else(|| ByteSize::b(0)) .cmp(&e1.size.unwrap_or_else(|| ByteSize::b(0))) }), SortingMethod::Date => entries.sort_by(|e1, e2| { // If, for some reason, we can't get the last modification date of an entry // let's consider it was modified on UNIX_EPOCH (01/01/19270 00:00:00) e2.last_modification_date .unwrap_or(SystemTime::UNIX_EPOCH) .cmp(&e1.last_modification_date.unwrap_or(SystemTime::UNIX_EPOCH)) }), }; } else { // Sort in alphanumeric order by default entries.sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone())) } if let Some(sorting_order) = query_params.order { if let SortingOrder::Descending = sorting_order { entries.reverse() } } let color_scheme = query_params.theme.unwrap_or(default_color_scheme); if let Some(compression_method) = &query_params.download { log::info!( "Creating an archive ({extension}) of {path}...", extension = compression_method.extension(), path = &dir.path.display().to_string() ); match compression_method.create_archive(&dir.path, skip_symlinks) { Ok((filename, content)) => { log::info!("{file} successfully created !", file = &filename); Ok(HttpResponse::Ok() .content_type(compression_method.content_type()) .content_encoding(compression_method.content_encoding()) .header("Content-Transfer-Encoding", "binary") .header( "Content-Disposition", format!("attachment; filename={:?}", filename), ) .chunked() .body(Body::Streaming(Box::new(once(Ok(content)))))) } Err(err) => { errors::log_error_chain(err.to_string()); Ok(HttpResponse::Ok() .status(http::StatusCode::INTERNAL_SERVER_ERROR) .body( renderer::render_error( &err.to_string(), StatusCode::INTERNAL_SERVER_ERROR, serve_path, query_params.sort, query_params.order, color_scheme, default_color_scheme, false, true, ) .into_string(), )) } } } else { Ok(HttpResponse::Ok() .content_type("text/html; charset=utf-8") .body( renderer::page( serve_path, entries, is_root, page_parent, query_params.sort, query_params.order, default_color_scheme, color_scheme, file_upload, &upload_route, &current_dir.display().to_string(), ) .into_string(), )) } } pub fn extract_query_parameters<S>(req: &HttpRequest<S>) -> QueryParameters { match Query::<QueryParameters>::extract(req) { Ok(query) => QueryParameters { sort: query.sort, order: query.order, download: query.download.clone(), theme: query.theme, path: query.path.clone(), }, Err(e) => { let err = ContextualError::ParseError("query parameters".to_string(), e.to_string()); errors::log_error_chain(err.to_string()); QueryParameters { sort: None, order: None, download: None, theme: None, path: None, } } } }
is_dir
identifier_name
listing.rs
use actix_web::http::StatusCode; use actix_web::{fs, http, Body, FromRequest, HttpRequest, HttpResponse, Query, Result}; use bytesize::ByteSize; use futures::stream::once; use htmlescape::encode_minimal as escape_html_entity; use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET}; use serde::Deserialize; use std::io; use std::path::{Path, PathBuf}; use std::time::SystemTime; use strum_macros::{Display, EnumString}; use crate::archive::CompressionMethod; use crate::errors::{self, ContextualError}; use crate::renderer; use crate::themes::ColorScheme; /// Query parameters #[derive(Deserialize)] pub struct QueryParameters { pub path: Option<PathBuf>, pub sort: Option<SortingMethod>, pub order: Option<SortingOrder>, pub theme: Option<ColorScheme>, download: Option<CompressionMethod>, } /// Available sorting methods #[derive(Deserialize, Clone, EnumString, Display, Copy)] #[serde(rename_all = "snake_case")] #[strum(serialize_all = "snake_case")] pub enum SortingMethod { /// Sort by name Name, /// Sort by size Size, /// Sort by last modification date (natural sort: follows alphanumerical order) Date, } /// Available sorting orders #[derive(Deserialize, Clone, EnumString, Display, Copy)] pub enum SortingOrder { /// Ascending order #[serde(alias = "asc")] #[strum(serialize = "asc")] Ascending, /// Descending order #[serde(alias = "desc")] #[strum(serialize = "desc")] Descending, } #[derive(PartialEq)] /// Possible entry types pub enum EntryType { /// Entry is a directory Directory, /// Entry is a file File, /// Entry is a symlink Symlink, }
/// Type of the entry pub entry_type: EntryType, /// URL of the entry pub link: String, /// Size in byte of the entry. Only available for EntryType::File pub size: Option<bytesize::ByteSize>, /// Last modification date pub last_modification_date: Option<SystemTime>, } impl Entry { fn new( name: String, entry_type: EntryType, link: String, size: Option<bytesize::ByteSize>, last_modification_date: Option<SystemTime>, ) -> Self { Entry { name, entry_type, link, size, last_modification_date, } } /// Returns whether the entry is a directory pub fn is_dir(&self) -> bool { self.entry_type == EntryType::Directory } /// Returns whether the entry is a file pub fn is_file(&self) -> bool { self.entry_type == EntryType::File } /// Returns whether the entry is a symlink pub fn is_symlink(&self) -> bool { self.entry_type == EntryType::Symlink } // Returns whether the entry is a video pub fn is_video(&self) -> bool { let video_extensions = vec!["mp4", "ogv", "avi", "mkv"]; self.entry_type == EntryType::File && self.extension() .map(|ext| video_extensions.contains(&ext.as_str())) .unwrap_or(false) } // Returns whether the entry is an audio file pub fn is_audio(&self) -> bool { let audio_extensions = vec!["ogg", "mp3", "aac", "flac", "wav", "m4a"]; self.entry_type == EntryType::File && self.extension() .map(|ext| audio_extensions.contains(&ext.as_str())) .unwrap_or(false) } fn extension(&self) -> Option<String> { std::path::PathBuf::from(&self.name).extension().and_then(|s| s.to_str()).map(|s| s.to_string()) } } pub fn file_handler(req: &HttpRequest<crate::MiniserveConfig>) -> Result<fs::NamedFile> { let path = &req.state().path; Ok(fs::NamedFile::open(path)?) } /// List a directory and renders a HTML file accordingly /// Adapted from https://docs.rs/actix-web/0.7.13/src/actix_web/fs.rs.html#564 #[allow(clippy::identity_conversion)] pub fn directory_listing<S>( dir: &fs::Directory, req: &HttpRequest<S>, skip_symlinks: bool, file_upload: bool, random_route: Option<String>, default_color_scheme: ColorScheme, upload_route: String, ) -> Result<HttpResponse, io::Error> { let serve_path = req.path(); let base = Path::new(serve_path); let random_route = format!("/{}", random_route.unwrap_or_default()); let is_root = base.parent().is_none() || req.path() == random_route; let page_parent = base.parent().map(|p| p.display().to_string()); let current_dir = match base.strip_prefix(random_route) { Ok(c_d) => Path::new("/").join(c_d), Err(_) => base.to_path_buf(), }; let query_params = extract_query_parameters(req); let mut entries: Vec<Entry> = Vec::new(); for entry in dir.path.read_dir()? { if dir.is_visible(&entry) { let entry = entry?; let p = match entry.path().strip_prefix(&dir.path) { Ok(p) => base.join(p), Err(_) => continue, }; // show file url as relative to static path let file_url = utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET).to_string(); // " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; let file_name = escape_html_entity(&entry.file_name().to_string_lossy()); // if file is a directory, add '/' to the end of the name if let Ok(metadata) = entry.metadata() { if skip_symlinks && metadata.file_type().is_symlink() { continue; } let last_modification_date = match metadata.modified() { Ok(date) => Some(date), Err(_) => None, }; if metadata.file_type().is_symlink() { entries.push(Entry::new( file_name, EntryType::Symlink, file_url, None, last_modification_date, )); } else if metadata.is_dir() { entries.push(Entry::new( file_name, EntryType::Directory, file_url, None, last_modification_date, )); } else { entries.push(Entry::new( file_name, EntryType::File, file_url, Some(ByteSize::b(metadata.len())), last_modification_date, )); } } else { continue; } } } if let Some(sorting_method) = query_params.sort { match sorting_method { SortingMethod::Name => entries .sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone())), SortingMethod::Size => entries.sort_by(|e1, e2| { // If we can't get the size of the entry (directory for instance) // let's consider it's 0b e2.size .unwrap_or_else(|| ByteSize::b(0)) .cmp(&e1.size.unwrap_or_else(|| ByteSize::b(0))) }), SortingMethod::Date => entries.sort_by(|e1, e2| { // If, for some reason, we can't get the last modification date of an entry // let's consider it was modified on UNIX_EPOCH (01/01/19270 00:00:00) e2.last_modification_date .unwrap_or(SystemTime::UNIX_EPOCH) .cmp(&e1.last_modification_date.unwrap_or(SystemTime::UNIX_EPOCH)) }), }; } else { // Sort in alphanumeric order by default entries.sort_by(|e1, e2| alphanumeric_sort::compare_str(e1.name.clone(), e2.name.clone())) } if let Some(sorting_order) = query_params.order { if let SortingOrder::Descending = sorting_order { entries.reverse() } } let color_scheme = query_params.theme.unwrap_or(default_color_scheme); if let Some(compression_method) = &query_params.download { log::info!( "Creating an archive ({extension}) of {path}...", extension = compression_method.extension(), path = &dir.path.display().to_string() ); match compression_method.create_archive(&dir.path, skip_symlinks) { Ok((filename, content)) => { log::info!("{file} successfully created !", file = &filename); Ok(HttpResponse::Ok() .content_type(compression_method.content_type()) .content_encoding(compression_method.content_encoding()) .header("Content-Transfer-Encoding", "binary") .header( "Content-Disposition", format!("attachment; filename={:?}", filename), ) .chunked() .body(Body::Streaming(Box::new(once(Ok(content)))))) } Err(err) => { errors::log_error_chain(err.to_string()); Ok(HttpResponse::Ok() .status(http::StatusCode::INTERNAL_SERVER_ERROR) .body( renderer::render_error( &err.to_string(), StatusCode::INTERNAL_SERVER_ERROR, serve_path, query_params.sort, query_params.order, color_scheme, default_color_scheme, false, true, ) .into_string(), )) } } } else { Ok(HttpResponse::Ok() .content_type("text/html; charset=utf-8") .body( renderer::page( serve_path, entries, is_root, page_parent, query_params.sort, query_params.order, default_color_scheme, color_scheme, file_upload, &upload_route, &current_dir.display().to_string(), ) .into_string(), )) } } pub fn extract_query_parameters<S>(req: &HttpRequest<S>) -> QueryParameters { match Query::<QueryParameters>::extract(req) { Ok(query) => QueryParameters { sort: query.sort, order: query.order, download: query.download.clone(), theme: query.theme, path: query.path.clone(), }, Err(e) => { let err = ContextualError::ParseError("query parameters".to_string(), e.to_string()); errors::log_error_chain(err.to_string()); QueryParameters { sort: None, order: None, download: None, theme: None, path: None, } } } }
/// Entry pub struct Entry { /// Name of the entry pub name: String,
random_line_split
masking.py
# # Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from dataclasses import dataclass from typing import Any, Dict import tensorflow as tf from tensorflow.keras import backend from tensorflow.python.ops import array_ops from merlin_standard_lib import Registry from merlin_standard_lib.utils.doc_utils import docstring_parameter masking_registry = Registry("tf.masking") MASK_SEQUENCE_PARAMETERS_DOCSTRING = """ hidden_size: int The hidden dimension of input tensors, needed to initialize trainable vector of masked positions. padding_idx: int, default = 0 Index of padding item used for getting batch of sequences with the same length eval_on_last_item_seq_only: bool, default = True Predict only last item during evaluation """ @dataclass class MaskingInfo: schema: tf.Tensor targets: tf.Tensor @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class MaskSequence(tf.keras.layers.Layer): """Base class to prepare masked items inputs/labels for language modeling tasks. Transformer architectures can be trained in different ways. Depending of the training method, there is a specific masking schema. The masking schema sets the items to be predicted (labels) and mask (hide) their positions in the sequence so that they are not used by the Transformer layers for prediction. We currently provide 4 different masking schemes out of the box: - Causal LM (clm) - Masked LM (mlm) - Permutation LM (plm) - Replacement Token Detection (rtd) This class can be extended to add different a masking scheme. Parameters ---------- {mask_sequence_parameters} """ # TODO: Link to masking-class in the doc-string. def __init__(self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, **kwargs): super(MaskSequence, self).__init__(**kwargs) self.padding_idx = padding_idx self.eval_on_last_item_seq_only = eval_on_last_item_seq_only self.mask_schema = None self.masked_targets = None def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting all next items or last item only in the sequence based on the param `eval_on_last_item_seq_only`. """ raise NotImplementedError def compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. And the attributes of the class `mask_schema` and `masked_targets` are updated to be re-used in other modules. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting the last item in the sequence. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ assert item_ids.ndim == 2, "`item_ids` must have 2 dimensions." masking_info = self._compute_masked_targets(item_ids, training=training) self.mask_schema, self.masked_targets = masking_info.schema, masking_info.targets return masking_info def apply_mask_to_inputs(self, inputs: tf.Tensor, schema: tf.Tensor) -> tf.Tensor: """ Control the masked positions in the inputs by replacing the true interaction by a learnable masked embedding. Parameters ---------- inputs: tf.Tensor The 3-D tensor of interaction embeddings resulting from the ops: TabularFeatures + aggregation + projection(optional) schema: MaskingSchema The boolean mask indicating masked positions. """ inputs = tf.where( tf.cast(tf.expand_dims(schema, -1), tf.bool), inputs, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return inputs def predict_all(self, item_ids: tf.Tensor) -> MaskingInfo: """ Prepare labels for all next item predictions instead of last-item predictions in a user's sequence. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ # TODO : Add option to predict N-last items # shift sequence of item-ids labels = item_ids[:, 1:] # As after shifting the sequence length will be subtracted by one, adding a masked item in # the sequence to return to the initial sequence. # This is important for ReformerModel(), for example labels = tf.concat( [ labels, tf.zeros((labels.shape[0], 1), dtype=labels.dtype), ], axis=-1, ) # apply mask on input where target is on padding index mask_labels = labels != self.padding_idx return MaskingInfo(mask_labels, labels) def call(self, inputs: tf.Tensor, item_ids: tf.Tensor, training=False) -> tf.Tensor: _ = self.compute_masked_targets(item_ids=item_ids, training=training) return self.apply_mask_to_inputs(inputs, self.mask_schema) def transformer_required_arguments(self) -> Dict[str, Any]: return {} def transformer_optional_arguments(self) -> Dict[str, Any]: return {} @property def transformer_arguments(self) -> Dict[str, Any]: """ Prepare additional arguments to pass to the Transformer forward methods. """ return {**self.transformer_required_arguments(), **self.transformer_optional_arguments()} def build(self, input_shape): self.hidden_size = input_shape[-1] # Create a trainable embedding to replace masked interactions initializer = tf.random_normal_initializer(mean=0.0, stddev=0.001) self.masked_item_embedding = tf.Variable( initializer(shape=[self.hidden_size], dtype=tf.float32) ) return super().build(input_shape) @masking_registry.register_with_multiple_names("clm", "causal") @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class CausalLanguageModeling(MaskSequence): """ In Causal Language Modeling (clm) you predict the next item based on past positions of the sequence. Future positions are masked. Parameters ---------- {mask_sequence_parameters} train_on_last_item_seq_only: predict only last item during training """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, train_on_last_item_seq_only: bool = False, **kwargs ): super(CausalLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.train_on_last_item_seq_only = train_on_last_item_seq_only def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: masking_info: MaskingInfo = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets if (self.eval_on_last_item_seq_only and not training) or ( self.train_on_last_item_seq_only and training ): rows_ids = tf.range(labels.shape[0], dtype=item_ids.dtype) label_seq_trg_eval = tf.zeros(labels.shape, dtype=labels.dtype) last_item_sessions = tf.reduce_sum(tf.cast(mask_labels, labels.dtype), axis=1) - 1 indices = tf.concat( [tf.expand_dims(rows_ids, 1), tf.expand_dims(last_item_sessions, 1)], axis=1 ) label_seq_trg_eval = tf.tensor_scatter_nd_update( label_seq_trg_eval, indices=indices, updates=tf.gather_nd(labels, indices) ) # Updating labels and mask mask_labels = label_seq_trg_eval != self.padding_idx labels = label_seq_trg_eval return MaskingInfo(mask_labels, labels) def apply_mask_to_inputs(self, inputs: tf.Tensor, mask_schema: tf.Tensor) -> tf.Tensor: # shift sequence of interaction embeddings pos_emb_inp = inputs[:, :-1] # Adding a masked item in the sequence to return to the initial sequence. pos_emb_inp = tf.concat( [ pos_emb_inp, tf.zeros((pos_emb_inp.shape[0], 1, pos_emb_inp.shape[2]), dtype=pos_emb_inp.dtype), ], axis=1, ) # Replacing the inputs corresponding to masked label with a trainable embedding pos_emb_inp = tf.where( tf.cast(tf.expand_dims(mask_schema, -1), tf.bool), pos_emb_inp, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return pos_emb_inp @masking_registry.register_with_multiple_names("mlm", "masked") class MaskedLanguageModeling(MaskSequence): """ In Masked Language Modeling (mlm) you randomly select some positions of the sequence to be predicted, which are masked. During training, the Transformer layer is allowed to use positions on the right (future info). During inference, all past items are visible for the Transformer layer, which tries to predict the next item. Parameters ---------- {mask_sequence_parameters} mlm_probability: Optional[float], default = 0.15 Probability of an item to be selected (masked) as a label of the given sequence. p.s. We enforce that at least one item is masked for each sequence, so that the network can learn something with it. """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, mlm_probability: float = 0.15, **kwargs ): super(MaskedLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.mlm_probability = mlm_probability def _compute_masked_targets(self, item_ids: tf.Tensor, training: bool = False) -> MaskingInfo: """ Prepare sequence with mask schema for masked language modeling prediction the function is based on HuggingFace's transformers/data/data_collator.py Parameters ---------- item_ids: tf.Tensor Sequence of input itemid (target) column Returns ------- labels: tf.Tensor Sequence of masked item ids. mask_labels: tf.Tensor Masking schema for masked targets positions. """ labels = tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype) non_padded_mask = tf.cast(item_ids != self.padding_idx, labels.dtype) rows_ids = tf.range(labels.shape[0], dtype=tf.int64) # During training, masks labels to be predicted according to a probability, ensuring that # each session has at least one label to predict if training: # Selects a percentage of items to be masked (selected as labels) probability_matrix = tf.cast( backend.random_bernoulli(array_ops.shape(labels), p=self.mlm_probability), labels.dtype, ) mask_labels = probability_matrix * non_padded_mask labels = tf.where( tf.cast(mask_labels, tf.bool), item_ids, tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype), ) # Set at least one item in the sequence to mask, so that the network # can learn something with this session one_random_index_by_session = tf.random.categorical( tf.math.log(tf.cast(non_padded_mask, tf.float32)), num_samples=1 ) indices = tf.concat([tf.expand_dims(rows_ids, 1), one_random_index_by_session], axis=1) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = tf.cast(labels != self.padding_idx, labels.dtype) # If a sequence has only masked labels, unmask one of the labels sequences_with_only_labels = tf.reduce_sum(mask_labels, axis=1) == tf.reduce_sum( non_padded_mask, axis=1 ) sampled_labels_to_unmask = tf.random.categorical( tf.math.log(tf.cast(mask_labels, tf.float32)), num_samples=1 ) labels_to_unmask = tf.boolean_mask(sampled_labels_to_unmask, sequences_with_only_labels) rows_to_unmask = tf.boolean_mask(rows_ids, sequences_with_only_labels) indices = tf.concat([tf.expand_dims(rows_to_unmask, 1), labels_to_unmask], axis=1) num_updates, _ = indices.shape.as_list() labels = tf.tensor_scatter_nd_update( labels, indices, tf.cast(tf.fill(num_updates, self.padding_idx), labels.dtype) ) mask_labels = labels != self.padding_idx else:
return MaskingInfo(mask_labels, labels) # @masking_registry.register_with_multiple_names("plm", "permutation") # class PermutationLanguageModeling(MaskSequence): # pass # # # @masking_registry.register_with_multiple_names("rtd", "replacement") # class ReplacementLanguageModeling(MaskSequence): # pass
if self.eval_on_last_item_seq_only: last_item_sessions = tf.reduce_sum(non_padded_mask, axis=1) - 1 indices = tf.concat( [ tf.expand_dims(rows_ids, 1), tf.cast(tf.expand_dims(last_item_sessions, 1), tf.int64), ], axis=1, ) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = labels != self.padding_idx else: masking_info = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets
conditional_block
masking.py
# # Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from dataclasses import dataclass from typing import Any, Dict import tensorflow as tf from tensorflow.keras import backend from tensorflow.python.ops import array_ops from merlin_standard_lib import Registry from merlin_standard_lib.utils.doc_utils import docstring_parameter masking_registry = Registry("tf.masking") MASK_SEQUENCE_PARAMETERS_DOCSTRING = """ hidden_size: int The hidden dimension of input tensors, needed to initialize trainable vector of masked positions. padding_idx: int, default = 0 Index of padding item used for getting batch of sequences with the same length eval_on_last_item_seq_only: bool, default = True Predict only last item during evaluation """ @dataclass class MaskingInfo: schema: tf.Tensor targets: tf.Tensor @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class MaskSequence(tf.keras.layers.Layer): """Base class to prepare masked items inputs/labels for language modeling tasks. Transformer architectures can be trained in different ways. Depending of the training method, there is a specific masking schema. The masking schema sets the items to be predicted (labels) and mask (hide) their positions in the sequence so that they are not used by the Transformer layers for prediction. We currently provide 4 different masking schemes out of the box: - Causal LM (clm) - Masked LM (mlm) - Permutation LM (plm) - Replacement Token Detection (rtd) This class can be extended to add different a masking scheme. Parameters ---------- {mask_sequence_parameters} """ # TODO: Link to masking-class in the doc-string. def __init__(self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, **kwargs): super(MaskSequence, self).__init__(**kwargs) self.padding_idx = padding_idx self.eval_on_last_item_seq_only = eval_on_last_item_seq_only self.mask_schema = None self.masked_targets = None def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting all next items or last item only in the sequence based on the param `eval_on_last_item_seq_only`. """ raise NotImplementedError def
(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. And the attributes of the class `mask_schema` and `masked_targets` are updated to be re-used in other modules. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting the last item in the sequence. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ assert item_ids.ndim == 2, "`item_ids` must have 2 dimensions." masking_info = self._compute_masked_targets(item_ids, training=training) self.mask_schema, self.masked_targets = masking_info.schema, masking_info.targets return masking_info def apply_mask_to_inputs(self, inputs: tf.Tensor, schema: tf.Tensor) -> tf.Tensor: """ Control the masked positions in the inputs by replacing the true interaction by a learnable masked embedding. Parameters ---------- inputs: tf.Tensor The 3-D tensor of interaction embeddings resulting from the ops: TabularFeatures + aggregation + projection(optional) schema: MaskingSchema The boolean mask indicating masked positions. """ inputs = tf.where( tf.cast(tf.expand_dims(schema, -1), tf.bool), inputs, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return inputs def predict_all(self, item_ids: tf.Tensor) -> MaskingInfo: """ Prepare labels for all next item predictions instead of last-item predictions in a user's sequence. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ # TODO : Add option to predict N-last items # shift sequence of item-ids labels = item_ids[:, 1:] # As after shifting the sequence length will be subtracted by one, adding a masked item in # the sequence to return to the initial sequence. # This is important for ReformerModel(), for example labels = tf.concat( [ labels, tf.zeros((labels.shape[0], 1), dtype=labels.dtype), ], axis=-1, ) # apply mask on input where target is on padding index mask_labels = labels != self.padding_idx return MaskingInfo(mask_labels, labels) def call(self, inputs: tf.Tensor, item_ids: tf.Tensor, training=False) -> tf.Tensor: _ = self.compute_masked_targets(item_ids=item_ids, training=training) return self.apply_mask_to_inputs(inputs, self.mask_schema) def transformer_required_arguments(self) -> Dict[str, Any]: return {} def transformer_optional_arguments(self) -> Dict[str, Any]: return {} @property def transformer_arguments(self) -> Dict[str, Any]: """ Prepare additional arguments to pass to the Transformer forward methods. """ return {**self.transformer_required_arguments(), **self.transformer_optional_arguments()} def build(self, input_shape): self.hidden_size = input_shape[-1] # Create a trainable embedding to replace masked interactions initializer = tf.random_normal_initializer(mean=0.0, stddev=0.001) self.masked_item_embedding = tf.Variable( initializer(shape=[self.hidden_size], dtype=tf.float32) ) return super().build(input_shape) @masking_registry.register_with_multiple_names("clm", "causal") @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class CausalLanguageModeling(MaskSequence): """ In Causal Language Modeling (clm) you predict the next item based on past positions of the sequence. Future positions are masked. Parameters ---------- {mask_sequence_parameters} train_on_last_item_seq_only: predict only last item during training """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, train_on_last_item_seq_only: bool = False, **kwargs ): super(CausalLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.train_on_last_item_seq_only = train_on_last_item_seq_only def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: masking_info: MaskingInfo = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets if (self.eval_on_last_item_seq_only and not training) or ( self.train_on_last_item_seq_only and training ): rows_ids = tf.range(labels.shape[0], dtype=item_ids.dtype) label_seq_trg_eval = tf.zeros(labels.shape, dtype=labels.dtype) last_item_sessions = tf.reduce_sum(tf.cast(mask_labels, labels.dtype), axis=1) - 1 indices = tf.concat( [tf.expand_dims(rows_ids, 1), tf.expand_dims(last_item_sessions, 1)], axis=1 ) label_seq_trg_eval = tf.tensor_scatter_nd_update( label_seq_trg_eval, indices=indices, updates=tf.gather_nd(labels, indices) ) # Updating labels and mask mask_labels = label_seq_trg_eval != self.padding_idx labels = label_seq_trg_eval return MaskingInfo(mask_labels, labels) def apply_mask_to_inputs(self, inputs: tf.Tensor, mask_schema: tf.Tensor) -> tf.Tensor: # shift sequence of interaction embeddings pos_emb_inp = inputs[:, :-1] # Adding a masked item in the sequence to return to the initial sequence. pos_emb_inp = tf.concat( [ pos_emb_inp, tf.zeros((pos_emb_inp.shape[0], 1, pos_emb_inp.shape[2]), dtype=pos_emb_inp.dtype), ], axis=1, ) # Replacing the inputs corresponding to masked label with a trainable embedding pos_emb_inp = tf.where( tf.cast(tf.expand_dims(mask_schema, -1), tf.bool), pos_emb_inp, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return pos_emb_inp @masking_registry.register_with_multiple_names("mlm", "masked") class MaskedLanguageModeling(MaskSequence): """ In Masked Language Modeling (mlm) you randomly select some positions of the sequence to be predicted, which are masked. During training, the Transformer layer is allowed to use positions on the right (future info). During inference, all past items are visible for the Transformer layer, which tries to predict the next item. Parameters ---------- {mask_sequence_parameters} mlm_probability: Optional[float], default = 0.15 Probability of an item to be selected (masked) as a label of the given sequence. p.s. We enforce that at least one item is masked for each sequence, so that the network can learn something with it. """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, mlm_probability: float = 0.15, **kwargs ): super(MaskedLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.mlm_probability = mlm_probability def _compute_masked_targets(self, item_ids: tf.Tensor, training: bool = False) -> MaskingInfo: """ Prepare sequence with mask schema for masked language modeling prediction the function is based on HuggingFace's transformers/data/data_collator.py Parameters ---------- item_ids: tf.Tensor Sequence of input itemid (target) column Returns ------- labels: tf.Tensor Sequence of masked item ids. mask_labels: tf.Tensor Masking schema for masked targets positions. """ labels = tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype) non_padded_mask = tf.cast(item_ids != self.padding_idx, labels.dtype) rows_ids = tf.range(labels.shape[0], dtype=tf.int64) # During training, masks labels to be predicted according to a probability, ensuring that # each session has at least one label to predict if training: # Selects a percentage of items to be masked (selected as labels) probability_matrix = tf.cast( backend.random_bernoulli(array_ops.shape(labels), p=self.mlm_probability), labels.dtype, ) mask_labels = probability_matrix * non_padded_mask labels = tf.where( tf.cast(mask_labels, tf.bool), item_ids, tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype), ) # Set at least one item in the sequence to mask, so that the network # can learn something with this session one_random_index_by_session = tf.random.categorical( tf.math.log(tf.cast(non_padded_mask, tf.float32)), num_samples=1 ) indices = tf.concat([tf.expand_dims(rows_ids, 1), one_random_index_by_session], axis=1) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = tf.cast(labels != self.padding_idx, labels.dtype) # If a sequence has only masked labels, unmask one of the labels sequences_with_only_labels = tf.reduce_sum(mask_labels, axis=1) == tf.reduce_sum( non_padded_mask, axis=1 ) sampled_labels_to_unmask = tf.random.categorical( tf.math.log(tf.cast(mask_labels, tf.float32)), num_samples=1 ) labels_to_unmask = tf.boolean_mask(sampled_labels_to_unmask, sequences_with_only_labels) rows_to_unmask = tf.boolean_mask(rows_ids, sequences_with_only_labels) indices = tf.concat([tf.expand_dims(rows_to_unmask, 1), labels_to_unmask], axis=1) num_updates, _ = indices.shape.as_list() labels = tf.tensor_scatter_nd_update( labels, indices, tf.cast(tf.fill(num_updates, self.padding_idx), labels.dtype) ) mask_labels = labels != self.padding_idx else: if self.eval_on_last_item_seq_only: last_item_sessions = tf.reduce_sum(non_padded_mask, axis=1) - 1 indices = tf.concat( [ tf.expand_dims(rows_ids, 1), tf.cast(tf.expand_dims(last_item_sessions, 1), tf.int64), ], axis=1, ) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = labels != self.padding_idx else: masking_info = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets return MaskingInfo(mask_labels, labels) # @masking_registry.register_with_multiple_names("plm", "permutation") # class PermutationLanguageModeling(MaskSequence): # pass # # # @masking_registry.register_with_multiple_names("rtd", "replacement") # class ReplacementLanguageModeling(MaskSequence): # pass
compute_masked_targets
identifier_name
masking.py
# # Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from dataclasses import dataclass from typing import Any, Dict import tensorflow as tf from tensorflow.keras import backend from tensorflow.python.ops import array_ops from merlin_standard_lib import Registry from merlin_standard_lib.utils.doc_utils import docstring_parameter masking_registry = Registry("tf.masking") MASK_SEQUENCE_PARAMETERS_DOCSTRING = """ hidden_size: int The hidden dimension of input tensors, needed to initialize trainable vector of masked positions. padding_idx: int, default = 0 Index of padding item used for getting batch of sequences with the same length eval_on_last_item_seq_only: bool, default = True Predict only last item during evaluation """ @dataclass class MaskingInfo: schema: tf.Tensor targets: tf.Tensor @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class MaskSequence(tf.keras.layers.Layer): """Base class to prepare masked items inputs/labels for language modeling tasks. Transformer architectures can be trained in different ways. Depending of the training method, there is a specific masking schema. The masking schema sets the items to be predicted (labels) and mask (hide) their positions in the sequence so that they are not used by the Transformer layers for prediction. We currently provide 4 different masking schemes out of the box: - Causal LM (clm) - Masked LM (mlm) - Permutation LM (plm) - Replacement Token Detection (rtd) This class can be extended to add different a masking scheme. Parameters ---------- {mask_sequence_parameters} """ # TODO: Link to masking-class in the doc-string. def __init__(self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, **kwargs):
def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting all next items or last item only in the sequence based on the param `eval_on_last_item_seq_only`. """ raise NotImplementedError def compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. And the attributes of the class `mask_schema` and `masked_targets` are updated to be re-used in other modules. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting the last item in the sequence. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ assert item_ids.ndim == 2, "`item_ids` must have 2 dimensions." masking_info = self._compute_masked_targets(item_ids, training=training) self.mask_schema, self.masked_targets = masking_info.schema, masking_info.targets return masking_info def apply_mask_to_inputs(self, inputs: tf.Tensor, schema: tf.Tensor) -> tf.Tensor: """ Control the masked positions in the inputs by replacing the true interaction by a learnable masked embedding. Parameters ---------- inputs: tf.Tensor The 3-D tensor of interaction embeddings resulting from the ops: TabularFeatures + aggregation + projection(optional) schema: MaskingSchema The boolean mask indicating masked positions. """ inputs = tf.where( tf.cast(tf.expand_dims(schema, -1), tf.bool), inputs, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return inputs def predict_all(self, item_ids: tf.Tensor) -> MaskingInfo: """ Prepare labels for all next item predictions instead of last-item predictions in a user's sequence. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ # TODO : Add option to predict N-last items # shift sequence of item-ids labels = item_ids[:, 1:] # As after shifting the sequence length will be subtracted by one, adding a masked item in # the sequence to return to the initial sequence. # This is important for ReformerModel(), for example labels = tf.concat( [ labels, tf.zeros((labels.shape[0], 1), dtype=labels.dtype), ], axis=-1, ) # apply mask on input where target is on padding index mask_labels = labels != self.padding_idx return MaskingInfo(mask_labels, labels) def call(self, inputs: tf.Tensor, item_ids: tf.Tensor, training=False) -> tf.Tensor: _ = self.compute_masked_targets(item_ids=item_ids, training=training) return self.apply_mask_to_inputs(inputs, self.mask_schema) def transformer_required_arguments(self) -> Dict[str, Any]: return {} def transformer_optional_arguments(self) -> Dict[str, Any]: return {} @property def transformer_arguments(self) -> Dict[str, Any]: """ Prepare additional arguments to pass to the Transformer forward methods. """ return {**self.transformer_required_arguments(), **self.transformer_optional_arguments()} def build(self, input_shape): self.hidden_size = input_shape[-1] # Create a trainable embedding to replace masked interactions initializer = tf.random_normal_initializer(mean=0.0, stddev=0.001) self.masked_item_embedding = tf.Variable( initializer(shape=[self.hidden_size], dtype=tf.float32) ) return super().build(input_shape) @masking_registry.register_with_multiple_names("clm", "causal") @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class CausalLanguageModeling(MaskSequence): """ In Causal Language Modeling (clm) you predict the next item based on past positions of the sequence. Future positions are masked. Parameters ---------- {mask_sequence_parameters} train_on_last_item_seq_only: predict only last item during training """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, train_on_last_item_seq_only: bool = False, **kwargs ): super(CausalLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.train_on_last_item_seq_only = train_on_last_item_seq_only def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: masking_info: MaskingInfo = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets if (self.eval_on_last_item_seq_only and not training) or ( self.train_on_last_item_seq_only and training ): rows_ids = tf.range(labels.shape[0], dtype=item_ids.dtype) label_seq_trg_eval = tf.zeros(labels.shape, dtype=labels.dtype) last_item_sessions = tf.reduce_sum(tf.cast(mask_labels, labels.dtype), axis=1) - 1 indices = tf.concat( [tf.expand_dims(rows_ids, 1), tf.expand_dims(last_item_sessions, 1)], axis=1 ) label_seq_trg_eval = tf.tensor_scatter_nd_update( label_seq_trg_eval, indices=indices, updates=tf.gather_nd(labels, indices) ) # Updating labels and mask mask_labels = label_seq_trg_eval != self.padding_idx labels = label_seq_trg_eval return MaskingInfo(mask_labels, labels) def apply_mask_to_inputs(self, inputs: tf.Tensor, mask_schema: tf.Tensor) -> tf.Tensor: # shift sequence of interaction embeddings pos_emb_inp = inputs[:, :-1] # Adding a masked item in the sequence to return to the initial sequence. pos_emb_inp = tf.concat( [ pos_emb_inp, tf.zeros((pos_emb_inp.shape[0], 1, pos_emb_inp.shape[2]), dtype=pos_emb_inp.dtype), ], axis=1, ) # Replacing the inputs corresponding to masked label with a trainable embedding pos_emb_inp = tf.where( tf.cast(tf.expand_dims(mask_schema, -1), tf.bool), pos_emb_inp, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return pos_emb_inp @masking_registry.register_with_multiple_names("mlm", "masked") class MaskedLanguageModeling(MaskSequence): """ In Masked Language Modeling (mlm) you randomly select some positions of the sequence to be predicted, which are masked. During training, the Transformer layer is allowed to use positions on the right (future info). During inference, all past items are visible for the Transformer layer, which tries to predict the next item. Parameters ---------- {mask_sequence_parameters} mlm_probability: Optional[float], default = 0.15 Probability of an item to be selected (masked) as a label of the given sequence. p.s. We enforce that at least one item is masked for each sequence, so that the network can learn something with it. """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, mlm_probability: float = 0.15, **kwargs ): super(MaskedLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.mlm_probability = mlm_probability def _compute_masked_targets(self, item_ids: tf.Tensor, training: bool = False) -> MaskingInfo: """ Prepare sequence with mask schema for masked language modeling prediction the function is based on HuggingFace's transformers/data/data_collator.py Parameters ---------- item_ids: tf.Tensor Sequence of input itemid (target) column Returns ------- labels: tf.Tensor Sequence of masked item ids. mask_labels: tf.Tensor Masking schema for masked targets positions. """ labels = tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype) non_padded_mask = tf.cast(item_ids != self.padding_idx, labels.dtype) rows_ids = tf.range(labels.shape[0], dtype=tf.int64) # During training, masks labels to be predicted according to a probability, ensuring that # each session has at least one label to predict if training: # Selects a percentage of items to be masked (selected as labels) probability_matrix = tf.cast( backend.random_bernoulli(array_ops.shape(labels), p=self.mlm_probability), labels.dtype, ) mask_labels = probability_matrix * non_padded_mask labels = tf.where( tf.cast(mask_labels, tf.bool), item_ids, tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype), ) # Set at least one item in the sequence to mask, so that the network # can learn something with this session one_random_index_by_session = tf.random.categorical( tf.math.log(tf.cast(non_padded_mask, tf.float32)), num_samples=1 ) indices = tf.concat([tf.expand_dims(rows_ids, 1), one_random_index_by_session], axis=1) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = tf.cast(labels != self.padding_idx, labels.dtype) # If a sequence has only masked labels, unmask one of the labels sequences_with_only_labels = tf.reduce_sum(mask_labels, axis=1) == tf.reduce_sum( non_padded_mask, axis=1 ) sampled_labels_to_unmask = tf.random.categorical( tf.math.log(tf.cast(mask_labels, tf.float32)), num_samples=1 ) labels_to_unmask = tf.boolean_mask(sampled_labels_to_unmask, sequences_with_only_labels) rows_to_unmask = tf.boolean_mask(rows_ids, sequences_with_only_labels) indices = tf.concat([tf.expand_dims(rows_to_unmask, 1), labels_to_unmask], axis=1) num_updates, _ = indices.shape.as_list() labels = tf.tensor_scatter_nd_update( labels, indices, tf.cast(tf.fill(num_updates, self.padding_idx), labels.dtype) ) mask_labels = labels != self.padding_idx else: if self.eval_on_last_item_seq_only: last_item_sessions = tf.reduce_sum(non_padded_mask, axis=1) - 1 indices = tf.concat( [ tf.expand_dims(rows_ids, 1), tf.cast(tf.expand_dims(last_item_sessions, 1), tf.int64), ], axis=1, ) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = labels != self.padding_idx else: masking_info = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets return MaskingInfo(mask_labels, labels) # @masking_registry.register_with_multiple_names("plm", "permutation") # class PermutationLanguageModeling(MaskSequence): # pass # # # @masking_registry.register_with_multiple_names("rtd", "replacement") # class ReplacementLanguageModeling(MaskSequence): # pass
super(MaskSequence, self).__init__(**kwargs) self.padding_idx = padding_idx self.eval_on_last_item_seq_only = eval_on_last_item_seq_only self.mask_schema = None self.masked_targets = None
identifier_body
masking.py
# # Copyright (c) 2021, NVIDIA CORPORATION. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from dataclasses import dataclass from typing import Any, Dict import tensorflow as tf from tensorflow.keras import backend from tensorflow.python.ops import array_ops from merlin_standard_lib import Registry from merlin_standard_lib.utils.doc_utils import docstring_parameter masking_registry = Registry("tf.masking") MASK_SEQUENCE_PARAMETERS_DOCSTRING = """ hidden_size: int The hidden dimension of input tensors, needed to initialize trainable vector of masked positions. padding_idx: int, default = 0 Index of padding item used for getting batch of sequences with the same length eval_on_last_item_seq_only: bool, default = True Predict only last item during evaluation """ @dataclass class MaskingInfo: schema: tf.Tensor targets: tf.Tensor @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class MaskSequence(tf.keras.layers.Layer): """Base class to prepare masked items inputs/labels for language modeling tasks. Transformer architectures can be trained in different ways. Depending of the training method, there is a specific masking schema. The masking schema sets the items to be predicted (labels) and mask (hide) their positions in the sequence so that they are not used by the Transformer layers for prediction. We currently provide 4 different masking schemes out of the box: - Causal LM (clm) - Masked LM (mlm) - Permutation LM (plm) - Replacement Token Detection (rtd) This class can be extended to add different a masking scheme. Parameters ---------- {mask_sequence_parameters} """ # TODO: Link to masking-class in the doc-string. def __init__(self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, **kwargs): super(MaskSequence, self).__init__(**kwargs) self.padding_idx = padding_idx self.eval_on_last_item_seq_only = eval_on_last_item_seq_only self.mask_schema = None self.masked_targets = None def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo:
""" Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting all next items or last item only in the sequence based on the param `eval_on_last_item_seq_only`. """ raise NotImplementedError def compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: """ Method to prepare masked labels based on the sequence of item ids. It returns The true labels of masked positions and the related boolean mask. And the attributes of the class `mask_schema` and `masked_targets` are updated to be re-used in other modules. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. training: bool Flag to indicate whether we are in `Training` mode or not. During training, the labels can be any items within the sequence based on the selected masking task. During evaluation, we are predicting the last item in the sequence. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ assert item_ids.ndim == 2, "`item_ids` must have 2 dimensions." masking_info = self._compute_masked_targets(item_ids, training=training) self.mask_schema, self.masked_targets = masking_info.schema, masking_info.targets return masking_info def apply_mask_to_inputs(self, inputs: tf.Tensor, schema: tf.Tensor) -> tf.Tensor: """ Control the masked positions in the inputs by replacing the true interaction by a learnable masked embedding. Parameters ---------- inputs: tf.Tensor The 3-D tensor of interaction embeddings resulting from the ops: TabularFeatures + aggregation + projection(optional) schema: MaskingSchema The boolean mask indicating masked positions. """ inputs = tf.where( tf.cast(tf.expand_dims(schema, -1), tf.bool), inputs, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return inputs def predict_all(self, item_ids: tf.Tensor) -> MaskingInfo: """ Prepare labels for all next item predictions instead of last-item predictions in a user's sequence. Parameters ---------- item_ids: tf.Tensor The sequence of input item ids used for deriving labels of next item prediction task. Returns ------- Tuple[MaskingSchema, MaskedTargets] """ # TODO : Add option to predict N-last items # shift sequence of item-ids labels = item_ids[:, 1:] # As after shifting the sequence length will be subtracted by one, adding a masked item in # the sequence to return to the initial sequence. # This is important for ReformerModel(), for example labels = tf.concat( [ labels, tf.zeros((labels.shape[0], 1), dtype=labels.dtype), ], axis=-1, ) # apply mask on input where target is on padding index mask_labels = labels != self.padding_idx return MaskingInfo(mask_labels, labels) def call(self, inputs: tf.Tensor, item_ids: tf.Tensor, training=False) -> tf.Tensor: _ = self.compute_masked_targets(item_ids=item_ids, training=training) return self.apply_mask_to_inputs(inputs, self.mask_schema) def transformer_required_arguments(self) -> Dict[str, Any]: return {} def transformer_optional_arguments(self) -> Dict[str, Any]: return {} @property def transformer_arguments(self) -> Dict[str, Any]: """ Prepare additional arguments to pass to the Transformer forward methods. """ return {**self.transformer_required_arguments(), **self.transformer_optional_arguments()} def build(self, input_shape): self.hidden_size = input_shape[-1] # Create a trainable embedding to replace masked interactions initializer = tf.random_normal_initializer(mean=0.0, stddev=0.001) self.masked_item_embedding = tf.Variable( initializer(shape=[self.hidden_size], dtype=tf.float32) ) return super().build(input_shape) @masking_registry.register_with_multiple_names("clm", "causal") @docstring_parameter(mask_sequence_parameters=MASK_SEQUENCE_PARAMETERS_DOCSTRING) class CausalLanguageModeling(MaskSequence): """ In Causal Language Modeling (clm) you predict the next item based on past positions of the sequence. Future positions are masked. Parameters ---------- {mask_sequence_parameters} train_on_last_item_seq_only: predict only last item during training """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, train_on_last_item_seq_only: bool = False, **kwargs ): super(CausalLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.train_on_last_item_seq_only = train_on_last_item_seq_only def _compute_masked_targets(self, item_ids: tf.Tensor, training=False) -> MaskingInfo: masking_info: MaskingInfo = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets if (self.eval_on_last_item_seq_only and not training) or ( self.train_on_last_item_seq_only and training ): rows_ids = tf.range(labels.shape[0], dtype=item_ids.dtype) label_seq_trg_eval = tf.zeros(labels.shape, dtype=labels.dtype) last_item_sessions = tf.reduce_sum(tf.cast(mask_labels, labels.dtype), axis=1) - 1 indices = tf.concat( [tf.expand_dims(rows_ids, 1), tf.expand_dims(last_item_sessions, 1)], axis=1 ) label_seq_trg_eval = tf.tensor_scatter_nd_update( label_seq_trg_eval, indices=indices, updates=tf.gather_nd(labels, indices) ) # Updating labels and mask mask_labels = label_seq_trg_eval != self.padding_idx labels = label_seq_trg_eval return MaskingInfo(mask_labels, labels) def apply_mask_to_inputs(self, inputs: tf.Tensor, mask_schema: tf.Tensor) -> tf.Tensor: # shift sequence of interaction embeddings pos_emb_inp = inputs[:, :-1] # Adding a masked item in the sequence to return to the initial sequence. pos_emb_inp = tf.concat( [ pos_emb_inp, tf.zeros((pos_emb_inp.shape[0], 1, pos_emb_inp.shape[2]), dtype=pos_emb_inp.dtype), ], axis=1, ) # Replacing the inputs corresponding to masked label with a trainable embedding pos_emb_inp = tf.where( tf.cast(tf.expand_dims(mask_schema, -1), tf.bool), pos_emb_inp, tf.cast(self.masked_item_embedding, dtype=inputs.dtype), ) return pos_emb_inp @masking_registry.register_with_multiple_names("mlm", "masked") class MaskedLanguageModeling(MaskSequence): """ In Masked Language Modeling (mlm) you randomly select some positions of the sequence to be predicted, which are masked. During training, the Transformer layer is allowed to use positions on the right (future info). During inference, all past items are visible for the Transformer layer, which tries to predict the next item. Parameters ---------- {mask_sequence_parameters} mlm_probability: Optional[float], default = 0.15 Probability of an item to be selected (masked) as a label of the given sequence. p.s. We enforce that at least one item is masked for each sequence, so that the network can learn something with it. """ def __init__( self, padding_idx: int = 0, eval_on_last_item_seq_only: bool = True, mlm_probability: float = 0.15, **kwargs ): super(MaskedLanguageModeling, self).__init__( padding_idx=padding_idx, eval_on_last_item_seq_only=eval_on_last_item_seq_only, **kwargs ) self.mlm_probability = mlm_probability def _compute_masked_targets(self, item_ids: tf.Tensor, training: bool = False) -> MaskingInfo: """ Prepare sequence with mask schema for masked language modeling prediction the function is based on HuggingFace's transformers/data/data_collator.py Parameters ---------- item_ids: tf.Tensor Sequence of input itemid (target) column Returns ------- labels: tf.Tensor Sequence of masked item ids. mask_labels: tf.Tensor Masking schema for masked targets positions. """ labels = tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype) non_padded_mask = tf.cast(item_ids != self.padding_idx, labels.dtype) rows_ids = tf.range(labels.shape[0], dtype=tf.int64) # During training, masks labels to be predicted according to a probability, ensuring that # each session has at least one label to predict if training: # Selects a percentage of items to be masked (selected as labels) probability_matrix = tf.cast( backend.random_bernoulli(array_ops.shape(labels), p=self.mlm_probability), labels.dtype, ) mask_labels = probability_matrix * non_padded_mask labels = tf.where( tf.cast(mask_labels, tf.bool), item_ids, tf.cast(tf.fill(item_ids.shape, self.padding_idx), dtype=item_ids.dtype), ) # Set at least one item in the sequence to mask, so that the network # can learn something with this session one_random_index_by_session = tf.random.categorical( tf.math.log(tf.cast(non_padded_mask, tf.float32)), num_samples=1 ) indices = tf.concat([tf.expand_dims(rows_ids, 1), one_random_index_by_session], axis=1) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = tf.cast(labels != self.padding_idx, labels.dtype) # If a sequence has only masked labels, unmask one of the labels sequences_with_only_labels = tf.reduce_sum(mask_labels, axis=1) == tf.reduce_sum( non_padded_mask, axis=1 ) sampled_labels_to_unmask = tf.random.categorical( tf.math.log(tf.cast(mask_labels, tf.float32)), num_samples=1 ) labels_to_unmask = tf.boolean_mask(sampled_labels_to_unmask, sequences_with_only_labels) rows_to_unmask = tf.boolean_mask(rows_ids, sequences_with_only_labels) indices = tf.concat([tf.expand_dims(rows_to_unmask, 1), labels_to_unmask], axis=1) num_updates, _ = indices.shape.as_list() labels = tf.tensor_scatter_nd_update( labels, indices, tf.cast(tf.fill(num_updates, self.padding_idx), labels.dtype) ) mask_labels = labels != self.padding_idx else: if self.eval_on_last_item_seq_only: last_item_sessions = tf.reduce_sum(non_padded_mask, axis=1) - 1 indices = tf.concat( [ tf.expand_dims(rows_ids, 1), tf.cast(tf.expand_dims(last_item_sessions, 1), tf.int64), ], axis=1, ) labels = tf.tensor_scatter_nd_update( labels, indices=indices, updates=tf.gather_nd(item_ids, indices) ) mask_labels = labels != self.padding_idx else: masking_info = self.predict_all(item_ids) mask_labels, labels = masking_info.schema, masking_info.targets return MaskingInfo(mask_labels, labels) # @masking_registry.register_with_multiple_names("plm", "permutation") # class PermutationLanguageModeling(MaskSequence): # pass # # # @masking_registry.register_with_multiple_names("rtd", "replacement") # class ReplacementLanguageModeling(MaskSequence): # pass
random_line_split
app.rs
//! Contains the main types a user needs to interact with to configure and run a skulpin app use crate::skia_safe; use crate::winit; use super::app_control::AppControl; use super::input_state::InputState; use super::time_state::TimeState; use super::util::PeriodicEvent; use skulpin_renderer::LogicalSize; use skulpin_renderer::Size; use skulpin_renderer::RendererBuilder; use skulpin_renderer::CoordinateSystem; use skulpin_renderer::CoordinateSystemHelper; use skulpin_renderer::ValidationMode; use skulpin_renderer::rafx::api::RafxError; use crate::rafx::api::RafxExtents2D; /// Represents an error from creating the renderer #[derive(Debug)] pub enum AppError { RafxError(skulpin_renderer::rafx::api::RafxError), WinitError(winit::error::OsError), } impl std::error::Error for AppError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match *self { AppError::RafxError(ref e) => Some(e), AppError::WinitError(ref e) => Some(e), } } } impl core::fmt::Display for AppError {
match *self { AppError::RafxError(ref e) => e.fmt(fmt), AppError::WinitError(ref e) => e.fmt(fmt), } } } impl From<RafxError> for AppError { fn from(result: RafxError) -> Self { AppError::RafxError(result) } } impl From<winit::error::OsError> for AppError { fn from(result: winit::error::OsError) -> Self { AppError::WinitError(result) } } pub struct AppUpdateArgs<'a, 'b, 'c> { pub app_control: &'a mut AppControl, pub input_state: &'b InputState, pub time_state: &'c TimeState, } pub struct AppDrawArgs<'a, 'b, 'c, 'd> { pub app_control: &'a AppControl, pub input_state: &'b InputState, pub time_state: &'c TimeState, pub canvas: &'d mut skia_safe::Canvas, pub coordinate_system_helper: CoordinateSystemHelper, } /// A skulpin app requires implementing the AppHandler. A separate update and draw call must be /// implemented. /// /// `update` is called when winit provides a `winit::event::Event::MainEventsCleared` message /// /// `draw` is called when winit provides a `winit::event::RedrawRequested` message /// /// I would recommend putting general logic you always want to run in the `update` and just /// rendering code in the `draw`. pub trait AppHandler { /// Called frequently, this is the intended place to put non-rendering logic fn update( &mut self, update_args: AppUpdateArgs, ); /// Called frequently, this is the intended place to put drawing code fn draw( &mut self, draw_args: AppDrawArgs, ); fn fatal_error( &mut self, error: &AppError, ); } /// Used to configure the app behavior and create the app pub struct AppBuilder { inner_size: Size, window_title: String, renderer_builder: RendererBuilder, } impl Default for AppBuilder { fn default() -> Self { AppBuilder::new() } } impl AppBuilder { /// Construct the app builder initialized with default options pub fn new() -> Self { AppBuilder { inner_size: LogicalSize::new(900, 600).into(), window_title: "Skulpin".to_string(), renderer_builder: RendererBuilder::new(), } } /// Specifies the inner size of the window. Both physical and logical coordinates are accepted. pub fn inner_size<S: Into<Size>>( mut self, inner_size: S, ) -> Self { self.inner_size = inner_size.into(); self } /// Specifies the title that the window will be created with pub fn window_title<T: Into<String>>( mut self, window_title: T, ) -> Self { self.window_title = window_title.into(); self } /// Determine the coordinate system to use for the canvas. This can be overridden by using the /// canvas sizer passed into the draw callback pub fn coordinate_system( mut self, coordinate_system: CoordinateSystem, ) -> Self { self.renderer_builder = self.renderer_builder.coordinate_system(coordinate_system); self } /// Set the validation mode in rafx. For skulpin, this essentially means turning the vulkan /// debug layers on/off. pub fn validation_mode( mut self, validation_mode: ValidationMode, ) -> Self { self.renderer_builder = self.renderer_builder.validation_mode(validation_mode); self } /// Start the app. `app_handler` must be an implementation of [skulpin::app::AppHandler]. /// This does not return because winit does not return. For consistency, we use the /// fatal_error() callback on the passed in AppHandler. pub fn run<T: 'static + AppHandler>( self, app_handler: T, ) -> ! { App::run( app_handler, self.inner_size, self.window_title.clone(), self.renderer_builder, ) } } /// Constructed by `AppBuilder` which immediately calls `run`. pub struct App {} impl App { /// Runs the app. This is called by `AppBuilder::run`. This does not return because winit does /// not return. For consistency, we use the fatal_error() callback on the passed in AppHandler. pub fn run<T: 'static + AppHandler>( mut app_handler: T, inner_size: Size, window_title: String, renderer_builder: RendererBuilder, ) -> ! { // Create the event loop let event_loop = winit::event_loop::EventLoop::<()>::with_user_event(); let winit_size = match inner_size { Size::Physical(physical_size) => winit::dpi::Size::Physical( winit::dpi::PhysicalSize::new(physical_size.width, physical_size.height), ), Size::Logical(logical_size) => winit::dpi::Size::Logical(winit::dpi::LogicalSize::new( logical_size.width as f64, logical_size.height as f64, )), }; // Create a single window let window_result = winit::window::WindowBuilder::new() .with_title(window_title) .with_inner_size(winit_size) .build(&event_loop); let window = match window_result { Ok(window) => window, Err(e) => { warn!("Passing WindowBuilder::build() error to app {}", e); let app_error = e.into(); app_handler.fatal_error(&app_error); // Exiting in this way is consistent with how we will exit if we fail within the // input loop std::process::exit(0); } }; let mut app_control = AppControl::default(); let mut time_state = TimeState::new(); let mut input_state = InputState::new(&window); let window_size = window.inner_size(); let window_extents = RafxExtents2D { width: window_size.width, height: window_size.height, }; let renderer_result = renderer_builder.build(&window, window_extents); let mut renderer = match renderer_result { Ok(renderer) => renderer, Err(e) => { warn!("Passing RendererBuilder::build() error to app {}", e); let app_error = e.into(); app_handler.fatal_error(&app_error); // Exiting in this way is consistent with how we will exit if we fail within the // input loop std::process::exit(0); } }; // To print fps once per second let mut print_fps_event = PeriodicEvent::default(); // Pass control of this thread to winit until the app terminates. If this app wants to quit, // the update loop should send the appropriate event via the channel event_loop.run(move |event, window_target, control_flow| { input_state.handle_winit_event(&mut app_control, &event, window_target); match event { winit::event::Event::MainEventsCleared => { time_state.update(); if print_fps_event.try_take_event( time_state.current_instant(), std::time::Duration::from_secs(1), ) { debug!("fps: {}", time_state.updates_per_second()); } app_handler.update(AppUpdateArgs { app_control: &mut app_control, input_state: &input_state, time_state: &time_state, }); // Call this to mark the start of the next frame (i.e. "key just down" will return false) input_state.end_frame(); // Queue a RedrawRequested event. window.request_redraw(); } winit::event::Event::RedrawRequested(_window_id) => { let window_size = window.inner_size(); let window_extents = RafxExtents2D { width: window_size.width, height: window_size.height, }; if let Err(e) = renderer.draw( window_extents, window.scale_factor(), |canvas, coordinate_system_helper| { app_handler.draw(AppDrawArgs { app_control: &app_control, input_state: &input_state, time_state: &time_state, canvas, coordinate_system_helper, }); }, ) { warn!("Passing Renderer::draw() error to app {}", e); app_handler.fatal_error(&e.into()); app_control.enqueue_terminate_process(); } } _ => {} } if app_control.should_terminate_process() { *control_flow = winit::event_loop::ControlFlow::Exit } }); } }
fn fmt( &self, fmt: &mut core::fmt::Formatter, ) -> core::fmt::Result {
random_line_split
app.rs
//! Contains the main types a user needs to interact with to configure and run a skulpin app use crate::skia_safe; use crate::winit; use super::app_control::AppControl; use super::input_state::InputState; use super::time_state::TimeState; use super::util::PeriodicEvent; use skulpin_renderer::LogicalSize; use skulpin_renderer::Size; use skulpin_renderer::RendererBuilder; use skulpin_renderer::CoordinateSystem; use skulpin_renderer::CoordinateSystemHelper; use skulpin_renderer::ValidationMode; use skulpin_renderer::rafx::api::RafxError; use crate::rafx::api::RafxExtents2D; /// Represents an error from creating the renderer #[derive(Debug)] pub enum AppError { RafxError(skulpin_renderer::rafx::api::RafxError), WinitError(winit::error::OsError), } impl std::error::Error for AppError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match *self { AppError::RafxError(ref e) => Some(e), AppError::WinitError(ref e) => Some(e), } } } impl core::fmt::Display for AppError { fn fmt( &self, fmt: &mut core::fmt::Formatter, ) -> core::fmt::Result { match *self { AppError::RafxError(ref e) => e.fmt(fmt), AppError::WinitError(ref e) => e.fmt(fmt), } } } impl From<RafxError> for AppError { fn from(result: RafxError) -> Self { AppError::RafxError(result) } } impl From<winit::error::OsError> for AppError { fn from(result: winit::error::OsError) -> Self { AppError::WinitError(result) } } pub struct AppUpdateArgs<'a, 'b, 'c> { pub app_control: &'a mut AppControl, pub input_state: &'b InputState, pub time_state: &'c TimeState, } pub struct AppDrawArgs<'a, 'b, 'c, 'd> { pub app_control: &'a AppControl, pub input_state: &'b InputState, pub time_state: &'c TimeState, pub canvas: &'d mut skia_safe::Canvas, pub coordinate_system_helper: CoordinateSystemHelper, } /// A skulpin app requires implementing the AppHandler. A separate update and draw call must be /// implemented. /// /// `update` is called when winit provides a `winit::event::Event::MainEventsCleared` message /// /// `draw` is called when winit provides a `winit::event::RedrawRequested` message /// /// I would recommend putting general logic you always want to run in the `update` and just /// rendering code in the `draw`. pub trait AppHandler { /// Called frequently, this is the intended place to put non-rendering logic fn update( &mut self, update_args: AppUpdateArgs, ); /// Called frequently, this is the intended place to put drawing code fn draw( &mut self, draw_args: AppDrawArgs, ); fn fatal_error( &mut self, error: &AppError, ); } /// Used to configure the app behavior and create the app pub struct AppBuilder { inner_size: Size, window_title: String, renderer_builder: RendererBuilder, } impl Default for AppBuilder { fn default() -> Self { AppBuilder::new() } } impl AppBuilder { /// Construct the app builder initialized with default options pub fn new() -> Self { AppBuilder { inner_size: LogicalSize::new(900, 600).into(), window_title: "Skulpin".to_string(), renderer_builder: RendererBuilder::new(), } } /// Specifies the inner size of the window. Both physical and logical coordinates are accepted. pub fn inner_size<S: Into<Size>>( mut self, inner_size: S, ) -> Self { self.inner_size = inner_size.into(); self } /// Specifies the title that the window will be created with pub fn window_title<T: Into<String>>( mut self, window_title: T, ) -> Self { self.window_title = window_title.into(); self } /// Determine the coordinate system to use for the canvas. This can be overridden by using the /// canvas sizer passed into the draw callback pub fn coordinate_system( mut self, coordinate_system: CoordinateSystem, ) -> Self { self.renderer_builder = self.renderer_builder.coordinate_system(coordinate_system); self } /// Set the validation mode in rafx. For skulpin, this essentially means turning the vulkan /// debug layers on/off. pub fn validation_mode( mut self, validation_mode: ValidationMode, ) -> Self { self.renderer_builder = self.renderer_builder.validation_mode(validation_mode); self } /// Start the app. `app_handler` must be an implementation of [skulpin::app::AppHandler]. /// This does not return because winit does not return. For consistency, we use the /// fatal_error() callback on the passed in AppHandler. pub fn run<T: 'static + AppHandler>( self, app_handler: T, ) -> ! { App::run( app_handler, self.inner_size, self.window_title.clone(), self.renderer_builder, ) } } /// Constructed by `AppBuilder` which immediately calls `run`. pub struct App {} impl App { /// Runs the app. This is called by `AppBuilder::run`. This does not return because winit does /// not return. For consistency, we use the fatal_error() callback on the passed in AppHandler. pub fn run<T: 'static + AppHandler>( mut app_handler: T, inner_size: Size, window_title: String, renderer_builder: RendererBuilder, ) -> ! { // Create the event loop let event_loop = winit::event_loop::EventLoop::<()>::with_user_event(); let winit_size = match inner_size { Size::Physical(physical_size) => winit::dpi::Size::Physical( winit::dpi::PhysicalSize::new(physical_size.width, physical_size.height), ), Size::Logical(logical_size) => winit::dpi::Size::Logical(winit::dpi::LogicalSize::new( logical_size.width as f64, logical_size.height as f64, )), }; // Create a single window let window_result = winit::window::WindowBuilder::new() .with_title(window_title) .with_inner_size(winit_size) .build(&event_loop); let window = match window_result { Ok(window) => window, Err(e) => { warn!("Passing WindowBuilder::build() error to app {}", e); let app_error = e.into(); app_handler.fatal_error(&app_error); // Exiting in this way is consistent with how we will exit if we fail within the // input loop std::process::exit(0); } }; let mut app_control = AppControl::default(); let mut time_state = TimeState::new(); let mut input_state = InputState::new(&window); let window_size = window.inner_size(); let window_extents = RafxExtents2D { width: window_size.width, height: window_size.height, }; let renderer_result = renderer_builder.build(&window, window_extents); let mut renderer = match renderer_result { Ok(renderer) => renderer, Err(e) => { warn!("Passing RendererBuilder::build() error to app {}", e); let app_error = e.into(); app_handler.fatal_error(&app_error); // Exiting in this way is consistent with how we will exit if we fail within the // input loop std::process::exit(0); } }; // To print fps once per second let mut print_fps_event = PeriodicEvent::default(); // Pass control of this thread to winit until the app terminates. If this app wants to quit, // the update loop should send the appropriate event via the channel event_loop.run(move |event, window_target, control_flow| { input_state.handle_winit_event(&mut app_control, &event, window_target); match event { winit::event::Event::MainEventsCleared => { time_state.update(); if print_fps_event.try_take_event( time_state.current_instant(), std::time::Duration::from_secs(1), ) { debug!("fps: {}", time_state.updates_per_second()); } app_handler.update(AppUpdateArgs { app_control: &mut app_control, input_state: &input_state, time_state: &time_state, }); // Call this to mark the start of the next frame (i.e. "key just down" will return false) input_state.end_frame(); // Queue a RedrawRequested event. window.request_redraw(); } winit::event::Event::RedrawRequested(_window_id) => { let window_size = window.inner_size(); let window_extents = RafxExtents2D { width: window_size.width, height: window_size.height, }; if let Err(e) = renderer.draw( window_extents, window.scale_factor(), |canvas, coordinate_system_helper| { app_handler.draw(AppDrawArgs { app_control: &app_control, input_state: &input_state, time_state: &time_state, canvas, coordinate_system_helper, }); }, ) { warn!("Passing Renderer::draw() error to app {}", e); app_handler.fatal_error(&e.into()); app_control.enqueue_terminate_process(); } } _ =>
} if app_control.should_terminate_process() { *control_flow = winit::event_loop::ControlFlow::Exit } }); } }
{}
conditional_block
app.rs
//! Contains the main types a user needs to interact with to configure and run a skulpin app use crate::skia_safe; use crate::winit; use super::app_control::AppControl; use super::input_state::InputState; use super::time_state::TimeState; use super::util::PeriodicEvent; use skulpin_renderer::LogicalSize; use skulpin_renderer::Size; use skulpin_renderer::RendererBuilder; use skulpin_renderer::CoordinateSystem; use skulpin_renderer::CoordinateSystemHelper; use skulpin_renderer::ValidationMode; use skulpin_renderer::rafx::api::RafxError; use crate::rafx::api::RafxExtents2D; /// Represents an error from creating the renderer #[derive(Debug)] pub enum AppError { RafxError(skulpin_renderer::rafx::api::RafxError), WinitError(winit::error::OsError), } impl std::error::Error for AppError { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { match *self { AppError::RafxError(ref e) => Some(e), AppError::WinitError(ref e) => Some(e), } } } impl core::fmt::Display for AppError { fn fmt( &self, fmt: &mut core::fmt::Formatter, ) -> core::fmt::Result { match *self { AppError::RafxError(ref e) => e.fmt(fmt), AppError::WinitError(ref e) => e.fmt(fmt), } } } impl From<RafxError> for AppError { fn from(result: RafxError) -> Self { AppError::RafxError(result) } } impl From<winit::error::OsError> for AppError { fn
(result: winit::error::OsError) -> Self { AppError::WinitError(result) } } pub struct AppUpdateArgs<'a, 'b, 'c> { pub app_control: &'a mut AppControl, pub input_state: &'b InputState, pub time_state: &'c TimeState, } pub struct AppDrawArgs<'a, 'b, 'c, 'd> { pub app_control: &'a AppControl, pub input_state: &'b InputState, pub time_state: &'c TimeState, pub canvas: &'d mut skia_safe::Canvas, pub coordinate_system_helper: CoordinateSystemHelper, } /// A skulpin app requires implementing the AppHandler. A separate update and draw call must be /// implemented. /// /// `update` is called when winit provides a `winit::event::Event::MainEventsCleared` message /// /// `draw` is called when winit provides a `winit::event::RedrawRequested` message /// /// I would recommend putting general logic you always want to run in the `update` and just /// rendering code in the `draw`. pub trait AppHandler { /// Called frequently, this is the intended place to put non-rendering logic fn update( &mut self, update_args: AppUpdateArgs, ); /// Called frequently, this is the intended place to put drawing code fn draw( &mut self, draw_args: AppDrawArgs, ); fn fatal_error( &mut self, error: &AppError, ); } /// Used to configure the app behavior and create the app pub struct AppBuilder { inner_size: Size, window_title: String, renderer_builder: RendererBuilder, } impl Default for AppBuilder { fn default() -> Self { AppBuilder::new() } } impl AppBuilder { /// Construct the app builder initialized with default options pub fn new() -> Self { AppBuilder { inner_size: LogicalSize::new(900, 600).into(), window_title: "Skulpin".to_string(), renderer_builder: RendererBuilder::new(), } } /// Specifies the inner size of the window. Both physical and logical coordinates are accepted. pub fn inner_size<S: Into<Size>>( mut self, inner_size: S, ) -> Self { self.inner_size = inner_size.into(); self } /// Specifies the title that the window will be created with pub fn window_title<T: Into<String>>( mut self, window_title: T, ) -> Self { self.window_title = window_title.into(); self } /// Determine the coordinate system to use for the canvas. This can be overridden by using the /// canvas sizer passed into the draw callback pub fn coordinate_system( mut self, coordinate_system: CoordinateSystem, ) -> Self { self.renderer_builder = self.renderer_builder.coordinate_system(coordinate_system); self } /// Set the validation mode in rafx. For skulpin, this essentially means turning the vulkan /// debug layers on/off. pub fn validation_mode( mut self, validation_mode: ValidationMode, ) -> Self { self.renderer_builder = self.renderer_builder.validation_mode(validation_mode); self } /// Start the app. `app_handler` must be an implementation of [skulpin::app::AppHandler]. /// This does not return because winit does not return. For consistency, we use the /// fatal_error() callback on the passed in AppHandler. pub fn run<T: 'static + AppHandler>( self, app_handler: T, ) -> ! { App::run( app_handler, self.inner_size, self.window_title.clone(), self.renderer_builder, ) } } /// Constructed by `AppBuilder` which immediately calls `run`. pub struct App {} impl App { /// Runs the app. This is called by `AppBuilder::run`. This does not return because winit does /// not return. For consistency, we use the fatal_error() callback on the passed in AppHandler. pub fn run<T: 'static + AppHandler>( mut app_handler: T, inner_size: Size, window_title: String, renderer_builder: RendererBuilder, ) -> ! { // Create the event loop let event_loop = winit::event_loop::EventLoop::<()>::with_user_event(); let winit_size = match inner_size { Size::Physical(physical_size) => winit::dpi::Size::Physical( winit::dpi::PhysicalSize::new(physical_size.width, physical_size.height), ), Size::Logical(logical_size) => winit::dpi::Size::Logical(winit::dpi::LogicalSize::new( logical_size.width as f64, logical_size.height as f64, )), }; // Create a single window let window_result = winit::window::WindowBuilder::new() .with_title(window_title) .with_inner_size(winit_size) .build(&event_loop); let window = match window_result { Ok(window) => window, Err(e) => { warn!("Passing WindowBuilder::build() error to app {}", e); let app_error = e.into(); app_handler.fatal_error(&app_error); // Exiting in this way is consistent with how we will exit if we fail within the // input loop std::process::exit(0); } }; let mut app_control = AppControl::default(); let mut time_state = TimeState::new(); let mut input_state = InputState::new(&window); let window_size = window.inner_size(); let window_extents = RafxExtents2D { width: window_size.width, height: window_size.height, }; let renderer_result = renderer_builder.build(&window, window_extents); let mut renderer = match renderer_result { Ok(renderer) => renderer, Err(e) => { warn!("Passing RendererBuilder::build() error to app {}", e); let app_error = e.into(); app_handler.fatal_error(&app_error); // Exiting in this way is consistent with how we will exit if we fail within the // input loop std::process::exit(0); } }; // To print fps once per second let mut print_fps_event = PeriodicEvent::default(); // Pass control of this thread to winit until the app terminates. If this app wants to quit, // the update loop should send the appropriate event via the channel event_loop.run(move |event, window_target, control_flow| { input_state.handle_winit_event(&mut app_control, &event, window_target); match event { winit::event::Event::MainEventsCleared => { time_state.update(); if print_fps_event.try_take_event( time_state.current_instant(), std::time::Duration::from_secs(1), ) { debug!("fps: {}", time_state.updates_per_second()); } app_handler.update(AppUpdateArgs { app_control: &mut app_control, input_state: &input_state, time_state: &time_state, }); // Call this to mark the start of the next frame (i.e. "key just down" will return false) input_state.end_frame(); // Queue a RedrawRequested event. window.request_redraw(); } winit::event::Event::RedrawRequested(_window_id) => { let window_size = window.inner_size(); let window_extents = RafxExtents2D { width: window_size.width, height: window_size.height, }; if let Err(e) = renderer.draw( window_extents, window.scale_factor(), |canvas, coordinate_system_helper| { app_handler.draw(AppDrawArgs { app_control: &app_control, input_state: &input_state, time_state: &time_state, canvas, coordinate_system_helper, }); }, ) { warn!("Passing Renderer::draw() error to app {}", e); app_handler.fatal_error(&e.into()); app_control.enqueue_terminate_process(); } } _ => {} } if app_control.should_terminate_process() { *control_flow = winit::event_loop::ControlFlow::Exit } }); } }
from
identifier_name
lds.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: envoy/api/v2/lds.proto package envoy_api_v2 import ( context "context" fmt "fmt" _ "github.com/cncf/udpa/go/udpa/annotations" _ "github.com/datawire/ambassador/pkg/api/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" _ "istio.io/gogo-genproto/googleapis/google/api" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. type LdsDummy struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LdsDummy) Reset() { *m = LdsDummy{} } func (m *LdsDummy) String() string { return proto.CompactTextString(m) } func (*LdsDummy) ProtoMessage() {} func (*LdsDummy) Descriptor() ([]byte, []int) { return fileDescriptor_34e2cd84a105bcd1, []int{0} } func (m *LdsDummy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LdsDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LdsDummy.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *LdsDummy) XXX_Merge(src proto.Message) { xxx_messageInfo_LdsDummy.Merge(m, src) } func (m *LdsDummy) XXX_Size() int { return m.Size() } func (m *LdsDummy) XXX_DiscardUnknown() { xxx_messageInfo_LdsDummy.DiscardUnknown(m) } var xxx_messageInfo_LdsDummy proto.InternalMessageInfo func init() { proto.RegisterType((*LdsDummy)(nil), "envoy.api.v2.LdsDummy") } func init() { proto.RegisterFile("envoy/api/v2/lds.proto", fileDescriptor_34e2cd84a105bcd1) } var fileDescriptor_34e2cd84a105bcd1 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcd, 0x2b, 0xcb, 0xaf, 0xd4, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x33, 0xd2, 0xcf, 0x49, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x8b, 0xeb, 0x25, 0x16, 0x64, 0xea, 0x95, 0x19, 0x49, 0xc9, 0xa0, 0xa8, 0x4a, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x84, 0xa8, 0x95, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x4b, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x41, 0x4d, 0x92, 0x92, 0x83, 0xca, 0x82, 0x79, 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa5, 0x45, 0x60, 0x05, 0xb8, 0xe4, 0xcb, 0x8b, 0x12, 0x0b, 0x0a, 0x52, 0x8b, 0x60, 0xfa, 0x15, 0xa0, 0x76, 0x23, 0x0c, 0xd6, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x85, 0x99, 0x50, 0x9a, 0x52, 0x90, 0x88, 0xa2, 0x20, 0x37, 0x33, 0xbd, 0x28, 0xb1, 0x04, 0x26, 0x2f, 0x8b, 0x21, 0x5f, 0x5c, 0x92, 0x58, 0x52, 0x0a, 0xb3, 0x40, 0xbc, 0x2c, 0x31, 0x27, 0x33, 0x25, 0xb1, 0x24, 0x55, 0x1f, 0xc6, 0x80, 0x4a, 0x48, 0xa3, 0x86, 0x4d, 0x66, 0x71, 0x49, 0x6a, 0x5e, 0x6a, 0x11, 0x44, 0x52, 0x89, 0x8b, 0x8b, 0xc3, 0x27, 0xa5, 0xd8, 0xa5, 0x34, 0x37, 0xb7, 0xd2, 0xe8, 0x33, 0x13, 0x97, 0x84, 0x0f, 0x54, 0xda, 0x05, 0x16, 0x38, 0xc1, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x42, 0x89, 0x5c, 0x7c, 0x2e, 0xa9, 0x39, 0x25, 0x89, 0x30, 0x05, 0xc5, 0x42, 0xca, 0x7a, 0xc8, 0x81, 0xab, 0x07, 0x96, 0x85, 0x6b, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0x52, 0xc1, 0xaf, 0xa8, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x89, 0x41, 0x83, 0xd1, 0x80, 0x51, 0x28, 0x82, 0x8b, 0x3f, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x17, 0x61, 0x87, 0x1c, 0x9a, 0x76, 0x74, 0xe3, 0xe5, 0x71, 0xca, 0xa3, 0x98, 0x5c, 0xcd, 0xc5, 0xe7, 0x96, 0x5a, 0x92, 0x9c, 0x41, 0x45, 0x83, 0x35, 0x9a, 0x2e, 0x3f, 0x99, 0xcc, 0x24, 0xa9, 0x24, 0x8e, 0x92, 0x94, 0xac, 0x60, 0xc1, 0x5b, 0x0c, 0x96, 0x66, 0xb6, 0x62, 0xd4, 0x92, 0x92, 0xed, 0x5a, 0x32, 0xed, 0x33, 0xbb, 0x38, 0x97, 0x28, 0x8a, 0x89, 0x30, 0xa7, 0x38, 0x25, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x9f, 0x66, 0xfc, 0xeb, 0x67, 0x95, 0x16, 0x92, 0x84, 0x28, 0x2d, 0x86, 0x84, 0xbd, 0x1e, 0x3c, 0xce, 0xca, 0x8c, 0x77, 0x35, 0x9c, 0xb8, 0xc8, 0xc6, 0x24, 0xc0, 0xc0, 0x25, 0x95, 0x99, 0x0f, 0x71, 0x62, 0x41, 0x51, 0x7e, 0x45, 0x25, 0x8a, 0x6b, 0x9d, 0x40, 0x51, 0x1a, 0x00, 0x8a, 0xde, 0x00, 0xc6, 0x0e, 0x46, 0xc6, 0x00, 0x8e, 0x24, 0x36, 0x70, 0x64, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x85, 0x49, 0xac, 0x27, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // ListenerDiscoveryServiceClient is the client API for ListenerDiscoveryService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ListenerDiscoveryServiceClient interface { DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) } type listenerDiscoveryServiceClient struct { cc *grpc.ClientConn } func NewListenerDiscoveryServiceClient(cc *grpc.ClientConn) ListenerDiscoveryServiceClient { return &listenerDiscoveryServiceClient{cc} } func (c *listenerDiscoveryServiceClient) DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[0], "/envoy.api.v2.ListenerDiscoveryService/DeltaListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceDeltaListenersClient{stream} return x, nil } type ListenerDiscoveryService_DeltaListenersClient interface { Send(*DeltaDiscoveryRequest) error Recv() (*DeltaDiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceDeltaListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceDeltaListenersClient) Send(m *DeltaDiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersClient) Recv() (*DeltaDiscoveryResponse, error) { m := new(DeltaDiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[1], "/envoy.api.v2.ListenerDiscoveryService/StreamListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceStreamListenersClient{stream} return x, nil } type ListenerDiscoveryService_StreamListenersClient interface { Send(*DiscoveryRequest) error Recv() (*DiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceStreamListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceStreamListenersClient) Send(m *DiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersClient) Recv() (*DiscoveryResponse, error) { m := new(DiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) { out := new(DiscoveryResponse) err := c.cc.Invoke(ctx, "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", in, out, opts...) if err != nil { return nil, err } return out, nil } // ListenerDiscoveryServiceServer is the server API for ListenerDiscoveryService service. type ListenerDiscoveryServiceServer interface { DeltaListeners(ListenerDiscoveryService_DeltaListenersServer) error StreamListeners(ListenerDiscoveryService_StreamListenersServer) error FetchListeners(context.Context, *DiscoveryRequest) (*DiscoveryResponse, error) } // UnimplementedListenerDiscoveryServiceServer can be embedded to have forward compatible implementations. type UnimplementedListenerDiscoveryServiceServer struct { } func (*UnimplementedListenerDiscoveryServiceServer) DeltaListeners(srv ListenerDiscoveryService_DeltaListenersServer) error { return status.Errorf(codes.Unimplemented, "method DeltaListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) StreamListeners(srv ListenerDiscoveryService_StreamListenersServer) error { return status.Errorf(codes.Unimplemented, "method StreamListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) FetchListeners(ctx context.Context, req *DiscoveryRequest) (*DiscoveryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchListeners not implemented") } func RegisterListenerDiscoveryServiceServer(s *grpc.Server, srv ListenerDiscoveryServiceServer)
func _ListenerDiscoveryService_DeltaListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).DeltaListeners(&listenerDiscoveryServiceDeltaListenersServer{stream}) } type ListenerDiscoveryService_DeltaListenersServer interface { Send(*DeltaDiscoveryResponse) error Recv() (*DeltaDiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceDeltaListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceDeltaListenersServer) Send(m *DeltaDiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersServer) Recv() (*DeltaDiscoveryRequest, error) { m := new(DeltaDiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_StreamListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).StreamListeners(&listenerDiscoveryServiceStreamListenersServer{stream}) } type ListenerDiscoveryService_StreamListenersServer interface { Send(*DiscoveryResponse) error Recv() (*DiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceStreamListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceStreamListenersServer) Send(m *DiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersServer) Recv() (*DiscoveryRequest, error) { m := new(DiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_FetchListeners_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DiscoveryRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, req.(*DiscoveryRequest)) } return interceptor(ctx, in, info, handler) } var _ListenerDiscoveryService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.api.v2.ListenerDiscoveryService", HandlerType: (*ListenerDiscoveryServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "FetchListeners", Handler: _ListenerDiscoveryService_FetchListeners_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "DeltaListeners", Handler: _ListenerDiscoveryService_DeltaListeners_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "StreamListeners", Handler: _ListenerDiscoveryService_StreamListeners_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "envoy/api/v2/lds.proto", } func (m *LdsDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LdsDummy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *LdsDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.XXX_unrecognized != nil { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } return len(dAtA) - i, nil } func encodeVarintLds(dAtA []byte, offset int, v uint64) int { offset -= sovLds(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *LdsDummy) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovLds(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozLds(x uint64) (n int) { return sovLds(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LdsDummy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLds } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LdsDummy: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LdsDummy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipLds(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipLds(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthLds } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupLds } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthLds } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthLds = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowLds = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupLds = fmt.Errorf("proto: unexpected end of group") )
{ s.RegisterService(&_ListenerDiscoveryService_serviceDesc, srv) }
identifier_body
lds.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: envoy/api/v2/lds.proto package envoy_api_v2 import ( context "context" fmt "fmt" _ "github.com/cncf/udpa/go/udpa/annotations" _ "github.com/datawire/ambassador/pkg/api/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" _ "istio.io/gogo-genproto/googleapis/google/api" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. type LdsDummy struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LdsDummy) Reset() { *m = LdsDummy{} } func (m *LdsDummy) String() string { return proto.CompactTextString(m) } func (*LdsDummy) ProtoMessage() {} func (*LdsDummy) Descriptor() ([]byte, []int) { return fileDescriptor_34e2cd84a105bcd1, []int{0} } func (m *LdsDummy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LdsDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LdsDummy.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *LdsDummy) XXX_Merge(src proto.Message) { xxx_messageInfo_LdsDummy.Merge(m, src) } func (m *LdsDummy) XXX_Size() int { return m.Size() } func (m *LdsDummy) XXX_DiscardUnknown() { xxx_messageInfo_LdsDummy.DiscardUnknown(m) } var xxx_messageInfo_LdsDummy proto.InternalMessageInfo func init() { proto.RegisterType((*LdsDummy)(nil), "envoy.api.v2.LdsDummy") } func init() { proto.RegisterFile("envoy/api/v2/lds.proto", fileDescriptor_34e2cd84a105bcd1) } var fileDescriptor_34e2cd84a105bcd1 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcd, 0x2b, 0xcb, 0xaf, 0xd4, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x33, 0xd2, 0xcf, 0x49, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x8b, 0xeb, 0x25, 0x16, 0x64, 0xea, 0x95, 0x19, 0x49, 0xc9, 0xa0, 0xa8, 0x4a, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x84, 0xa8, 0x95, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x4b, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x41, 0x4d, 0x92, 0x92, 0x83, 0xca, 0x82, 0x79, 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa5, 0x45, 0x60, 0x05, 0xb8, 0xe4, 0xcb, 0x8b, 0x12, 0x0b, 0x0a, 0x52, 0x8b, 0x60, 0xfa, 0x15, 0xa0, 0x76, 0x23, 0x0c, 0xd6, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x85, 0x99, 0x50, 0x9a, 0x52, 0x90, 0x88, 0xa2, 0x20, 0x37, 0x33, 0xbd, 0x28, 0xb1, 0x04, 0x26, 0x2f, 0x8b, 0x21, 0x5f, 0x5c, 0x92, 0x58, 0x52, 0x0a, 0xb3, 0x40, 0xbc, 0x2c, 0x31, 0x27, 0x33, 0x25, 0xb1, 0x24, 0x55, 0x1f, 0xc6, 0x80, 0x4a, 0x48, 0xa3, 0x86, 0x4d, 0x66, 0x71, 0x49, 0x6a, 0x5e, 0x6a, 0x11, 0x44, 0x52, 0x89, 0x8b, 0x8b, 0xc3, 0x27, 0xa5, 0xd8, 0xa5, 0x34, 0x37, 0xb7, 0xd2, 0xe8, 0x33, 0x13, 0x97, 0x84, 0x0f, 0x54, 0xda, 0x05, 0x16, 0x38, 0xc1, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x42, 0x89, 0x5c, 0x7c, 0x2e, 0xa9, 0x39, 0x25, 0x89, 0x30, 0x05, 0xc5, 0x42, 0xca, 0x7a, 0xc8, 0x81, 0xab, 0x07, 0x96, 0x85, 0x6b, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0x52, 0xc1, 0xaf, 0xa8, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x89, 0x41, 0x83, 0xd1, 0x80, 0x51, 0x28, 0x82, 0x8b, 0x3f, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x17, 0x61, 0x87, 0x1c, 0x9a, 0x76, 0x74, 0xe3, 0xe5, 0x71, 0xca, 0xa3, 0x98, 0x5c, 0xcd, 0xc5, 0xe7, 0x96, 0x5a, 0x92, 0x9c, 0x41, 0x45, 0x83, 0x35, 0x9a, 0x2e, 0x3f, 0x99, 0xcc, 0x24, 0xa9, 0x24, 0x8e, 0x92, 0x94, 0xac, 0x60, 0xc1, 0x5b, 0x0c, 0x96, 0x66, 0xb6, 0x62, 0xd4, 0x92, 0x92, 0xed, 0x5a, 0x32, 0xed, 0x33, 0xbb, 0x38, 0x97, 0x28, 0x8a, 0x89, 0x30, 0xa7, 0x38, 0x25, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x9f, 0x66, 0xfc, 0xeb, 0x67, 0x95, 0x16, 0x92, 0x84, 0x28, 0x2d, 0x86, 0x84, 0xbd, 0x1e, 0x3c, 0xce, 0xca, 0x8c, 0x77, 0x35, 0x9c, 0xb8, 0xc8, 0xc6, 0x24, 0xc0, 0xc0, 0x25, 0x95, 0x99, 0x0f, 0x71, 0x62, 0x41, 0x51, 0x7e, 0x45, 0x25, 0x8a, 0x6b, 0x9d, 0x40, 0x51, 0x1a, 0x00, 0x8a, 0xde, 0x00, 0xc6, 0x0e, 0x46, 0xc6, 0x00, 0x8e, 0x24, 0x36, 0x70, 0x64, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x85, 0x49, 0xac, 0x27, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // ListenerDiscoveryServiceClient is the client API for ListenerDiscoveryService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ListenerDiscoveryServiceClient interface { DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) } type listenerDiscoveryServiceClient struct { cc *grpc.ClientConn } func NewListenerDiscoveryServiceClient(cc *grpc.ClientConn) ListenerDiscoveryServiceClient { return &listenerDiscoveryServiceClient{cc} } func (c *listenerDiscoveryServiceClient) DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[0], "/envoy.api.v2.ListenerDiscoveryService/DeltaListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceDeltaListenersClient{stream} return x, nil } type ListenerDiscoveryService_DeltaListenersClient interface { Send(*DeltaDiscoveryRequest) error Recv() (*DeltaDiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceDeltaListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceDeltaListenersClient) Send(m *DeltaDiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersClient) Recv() (*DeltaDiscoveryResponse, error) { m := new(DeltaDiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[1], "/envoy.api.v2.ListenerDiscoveryService/StreamListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceStreamListenersClient{stream} return x, nil } type ListenerDiscoveryService_StreamListenersClient interface { Send(*DiscoveryRequest) error Recv() (*DiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceStreamListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceStreamListenersClient) Send(m *DiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersClient) Recv() (*DiscoveryResponse, error) { m := new(DiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) { out := new(DiscoveryResponse) err := c.cc.Invoke(ctx, "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", in, out, opts...) if err != nil { return nil, err } return out, nil } // ListenerDiscoveryServiceServer is the server API for ListenerDiscoveryService service. type ListenerDiscoveryServiceServer interface { DeltaListeners(ListenerDiscoveryService_DeltaListenersServer) error StreamListeners(ListenerDiscoveryService_StreamListenersServer) error FetchListeners(context.Context, *DiscoveryRequest) (*DiscoveryResponse, error) } // UnimplementedListenerDiscoveryServiceServer can be embedded to have forward compatible implementations. type UnimplementedListenerDiscoveryServiceServer struct { } func (*UnimplementedListenerDiscoveryServiceServer) DeltaListeners(srv ListenerDiscoveryService_DeltaListenersServer) error { return status.Errorf(codes.Unimplemented, "method DeltaListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) StreamListeners(srv ListenerDiscoveryService_StreamListenersServer) error { return status.Errorf(codes.Unimplemented, "method StreamListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) FetchListeners(ctx context.Context, req *DiscoveryRequest) (*DiscoveryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchListeners not implemented") } func RegisterListenerDiscoveryServiceServer(s *grpc.Server, srv ListenerDiscoveryServiceServer) { s.RegisterService(&_ListenerDiscoveryService_serviceDesc, srv) } func _ListenerDiscoveryService_DeltaListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).DeltaListeners(&listenerDiscoveryServiceDeltaListenersServer{stream}) } type ListenerDiscoveryService_DeltaListenersServer interface { Send(*DeltaDiscoveryResponse) error Recv() (*DeltaDiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceDeltaListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceDeltaListenersServer) Send(m *DeltaDiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersServer) Recv() (*DeltaDiscoveryRequest, error) { m := new(DeltaDiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_StreamListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).StreamListeners(&listenerDiscoveryServiceStreamListenersServer{stream}) } type ListenerDiscoveryService_StreamListenersServer interface { Send(*DiscoveryResponse) error Recv() (*DiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceStreamListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceStreamListenersServer) Send(m *DiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersServer) Recv() (*DiscoveryRequest, error) { m := new(DiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_FetchListeners_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DiscoveryRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, req.(*DiscoveryRequest)) } return interceptor(ctx, in, info, handler) } var _ListenerDiscoveryService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.api.v2.ListenerDiscoveryService", HandlerType: (*ListenerDiscoveryServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "FetchListeners", Handler: _ListenerDiscoveryService_FetchListeners_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "DeltaListeners", Handler: _ListenerDiscoveryService_DeltaListeners_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "StreamListeners", Handler: _ListenerDiscoveryService_StreamListeners_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "envoy/api/v2/lds.proto", } func (m *LdsDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LdsDummy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *LdsDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.XXX_unrecognized != nil { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } return len(dAtA) - i, nil } func encodeVarintLds(dAtA []byte, offset int, v uint64) int { offset -= sovLds(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *LdsDummy) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovLds(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozLds(x uint64) (n int) { return sovLds(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LdsDummy) Unmarshal(dAtA []byte) error {
l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLds } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LdsDummy: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LdsDummy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipLds(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipLds(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthLds } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupLds } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthLds } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthLds = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowLds = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupLds = fmt.Errorf("proto: unexpected end of group") )
random_line_split
lds.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: envoy/api/v2/lds.proto package envoy_api_v2 import ( context "context" fmt "fmt" _ "github.com/cncf/udpa/go/udpa/annotations" _ "github.com/datawire/ambassador/pkg/api/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" _ "istio.io/gogo-genproto/googleapis/google/api" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. type LdsDummy struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LdsDummy) Reset() { *m = LdsDummy{} } func (m *LdsDummy) String() string { return proto.CompactTextString(m) } func (*LdsDummy) ProtoMessage() {} func (*LdsDummy) Descriptor() ([]byte, []int) { return fileDescriptor_34e2cd84a105bcd1, []int{0} } func (m *LdsDummy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) } func (m *LdsDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LdsDummy.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *LdsDummy) XXX_Merge(src proto.Message) { xxx_messageInfo_LdsDummy.Merge(m, src) } func (m *LdsDummy) XXX_Size() int { return m.Size() } func (m *LdsDummy) XXX_DiscardUnknown() { xxx_messageInfo_LdsDummy.DiscardUnknown(m) } var xxx_messageInfo_LdsDummy proto.InternalMessageInfo func init() { proto.RegisterType((*LdsDummy)(nil), "envoy.api.v2.LdsDummy") } func init() { proto.RegisterFile("envoy/api/v2/lds.proto", fileDescriptor_34e2cd84a105bcd1) } var fileDescriptor_34e2cd84a105bcd1 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcd, 0x2b, 0xcb, 0xaf, 0xd4, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x33, 0xd2, 0xcf, 0x49, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x8b, 0xeb, 0x25, 0x16, 0x64, 0xea, 0x95, 0x19, 0x49, 0xc9, 0xa0, 0xa8, 0x4a, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x84, 0xa8, 0x95, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x4b, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x41, 0x4d, 0x92, 0x92, 0x83, 0xca, 0x82, 0x79, 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa5, 0x45, 0x60, 0x05, 0xb8, 0xe4, 0xcb, 0x8b, 0x12, 0x0b, 0x0a, 0x52, 0x8b, 0x60, 0xfa, 0x15, 0xa0, 0x76, 0x23, 0x0c, 0xd6, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x85, 0x99, 0x50, 0x9a, 0x52, 0x90, 0x88, 0xa2, 0x20, 0x37, 0x33, 0xbd, 0x28, 0xb1, 0x04, 0x26, 0x2f, 0x8b, 0x21, 0x5f, 0x5c, 0x92, 0x58, 0x52, 0x0a, 0xb3, 0x40, 0xbc, 0x2c, 0x31, 0x27, 0x33, 0x25, 0xb1, 0x24, 0x55, 0x1f, 0xc6, 0x80, 0x4a, 0x48, 0xa3, 0x86, 0x4d, 0x66, 0x71, 0x49, 0x6a, 0x5e, 0x6a, 0x11, 0x44, 0x52, 0x89, 0x8b, 0x8b, 0xc3, 0x27, 0xa5, 0xd8, 0xa5, 0x34, 0x37, 0xb7, 0xd2, 0xe8, 0x33, 0x13, 0x97, 0x84, 0x0f, 0x54, 0xda, 0x05, 0x16, 0x38, 0xc1, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x42, 0x89, 0x5c, 0x7c, 0x2e, 0xa9, 0x39, 0x25, 0x89, 0x30, 0x05, 0xc5, 0x42, 0xca, 0x7a, 0xc8, 0x81, 0xab, 0x07, 0x96, 0x85, 0x6b, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0x52, 0xc1, 0xaf, 0xa8, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x89, 0x41, 0x83, 0xd1, 0x80, 0x51, 0x28, 0x82, 0x8b, 0x3f, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x17, 0x61, 0x87, 0x1c, 0x9a, 0x76, 0x74, 0xe3, 0xe5, 0x71, 0xca, 0xa3, 0x98, 0x5c, 0xcd, 0xc5, 0xe7, 0x96, 0x5a, 0x92, 0x9c, 0x41, 0x45, 0x83, 0x35, 0x9a, 0x2e, 0x3f, 0x99, 0xcc, 0x24, 0xa9, 0x24, 0x8e, 0x92, 0x94, 0xac, 0x60, 0xc1, 0x5b, 0x0c, 0x96, 0x66, 0xb6, 0x62, 0xd4, 0x92, 0x92, 0xed, 0x5a, 0x32, 0xed, 0x33, 0xbb, 0x38, 0x97, 0x28, 0x8a, 0x89, 0x30, 0xa7, 0x38, 0x25, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x9f, 0x66, 0xfc, 0xeb, 0x67, 0x95, 0x16, 0x92, 0x84, 0x28, 0x2d, 0x86, 0x84, 0xbd, 0x1e, 0x3c, 0xce, 0xca, 0x8c, 0x77, 0x35, 0x9c, 0xb8, 0xc8, 0xc6, 0x24, 0xc0, 0xc0, 0x25, 0x95, 0x99, 0x0f, 0x71, 0x62, 0x41, 0x51, 0x7e, 0x45, 0x25, 0x8a, 0x6b, 0x9d, 0x40, 0x51, 0x1a, 0x00, 0x8a, 0xde, 0x00, 0xc6, 0x0e, 0x46, 0xc6, 0x00, 0x8e, 0x24, 0x36, 0x70, 0x64, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x85, 0x49, 0xac, 0x27, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // ListenerDiscoveryServiceClient is the client API for ListenerDiscoveryService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ListenerDiscoveryServiceClient interface { DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) } type listenerDiscoveryServiceClient struct { cc *grpc.ClientConn } func NewListenerDiscoveryServiceClient(cc *grpc.ClientConn) ListenerDiscoveryServiceClient { return &listenerDiscoveryServiceClient{cc} } func (c *listenerDiscoveryServiceClient) DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[0], "/envoy.api.v2.ListenerDiscoveryService/DeltaListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceDeltaListenersClient{stream} return x, nil } type ListenerDiscoveryService_DeltaListenersClient interface { Send(*DeltaDiscoveryRequest) error Recv() (*DeltaDiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceDeltaListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceDeltaListenersClient) Send(m *DeltaDiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersClient) Recv() (*DeltaDiscoveryResponse, error) { m := new(DeltaDiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[1], "/envoy.api.v2.ListenerDiscoveryService/StreamListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceStreamListenersClient{stream} return x, nil } type ListenerDiscoveryService_StreamListenersClient interface { Send(*DiscoveryRequest) error Recv() (*DiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceStreamListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceStreamListenersClient) Send(m *DiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersClient) Recv() (*DiscoveryResponse, error) { m := new(DiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) { out := new(DiscoveryResponse) err := c.cc.Invoke(ctx, "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", in, out, opts...) if err != nil { return nil, err } return out, nil } // ListenerDiscoveryServiceServer is the server API for ListenerDiscoveryService service. type ListenerDiscoveryServiceServer interface { DeltaListeners(ListenerDiscoveryService_DeltaListenersServer) error StreamListeners(ListenerDiscoveryService_StreamListenersServer) error FetchListeners(context.Context, *DiscoveryRequest) (*DiscoveryResponse, error) } // UnimplementedListenerDiscoveryServiceServer can be embedded to have forward compatible implementations. type UnimplementedListenerDiscoveryServiceServer struct { } func (*UnimplementedListenerDiscoveryServiceServer) DeltaListeners(srv ListenerDiscoveryService_DeltaListenersServer) error { return status.Errorf(codes.Unimplemented, "method DeltaListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) StreamListeners(srv ListenerDiscoveryService_StreamListenersServer) error { return status.Errorf(codes.Unimplemented, "method StreamListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) FetchListeners(ctx context.Context, req *DiscoveryRequest) (*DiscoveryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchListeners not implemented") } func RegisterListenerDiscoveryServiceServer(s *grpc.Server, srv ListenerDiscoveryServiceServer) { s.RegisterService(&_ListenerDiscoveryService_serviceDesc, srv) } func _ListenerDiscoveryService_DeltaListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).DeltaListeners(&listenerDiscoveryServiceDeltaListenersServer{stream}) } type ListenerDiscoveryService_DeltaListenersServer interface { Send(*DeltaDiscoveryResponse) error Recv() (*DeltaDiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceDeltaListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceDeltaListenersServer) Send(m *DeltaDiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersServer) Recv() (*DeltaDiscoveryRequest, error) { m := new(DeltaDiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_StreamListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).StreamListeners(&listenerDiscoveryServiceStreamListenersServer{stream}) } type ListenerDiscoveryService_StreamListenersServer interface { Send(*DiscoveryResponse) error Recv() (*DiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceStreamListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceStreamListenersServer) Send(m *DiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersServer) Recv() (*DiscoveryRequest, error) { m := new(DiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_FetchListeners_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DiscoveryRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, req.(*DiscoveryRequest)) } return interceptor(ctx, in, info, handler) } var _ListenerDiscoveryService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.api.v2.ListenerDiscoveryService", HandlerType: (*ListenerDiscoveryServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "FetchListeners", Handler: _ListenerDiscoveryService_FetchListeners_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "DeltaListeners", Handler: _ListenerDiscoveryService_DeltaListeners_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "StreamListeners", Handler: _ListenerDiscoveryService_StreamListeners_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "envoy/api/v2/lds.proto", } func (m *LdsDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LdsDummy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *LdsDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.XXX_unrecognized != nil { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } return len(dAtA) - i, nil } func encodeVarintLds(dAtA []byte, offset int, v uint64) int { offset -= sovLds(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *LdsDummy) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovLds(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozLds(x uint64) (n int) { return sovLds(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LdsDummy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLds } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LdsDummy: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LdsDummy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipLds(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipLds(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64
if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthLds } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupLds } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthLds } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthLds = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowLds = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupLds = fmt.Errorf("proto: unexpected end of group") )
{ return 0, ErrIntOverflowLds }
conditional_block
lds.pb.go
// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: envoy/api/v2/lds.proto package envoy_api_v2 import ( context "context" fmt "fmt" _ "github.com/cncf/udpa/go/udpa/annotations" _ "github.com/datawire/ambassador/pkg/api/envoy/annotations" _ "github.com/envoyproxy/protoc-gen-validate/validate" proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" io "io" _ "istio.io/gogo-genproto/googleapis/google/api" math "math" math_bits "math/bits" ) // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. // A compilation error at this line likely means your copy of the // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // [#not-implemented-hide:] Not configuration. Workaround c++ protobuf issue with importing // services: https://github.com/google/protobuf/issues/4221 and protoxform to upgrade the file. type LdsDummy struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` } func (m *LdsDummy) Reset() { *m = LdsDummy{} } func (m *LdsDummy) String() string { return proto.CompactTextString(m) } func (*LdsDummy) ProtoMessage() {} func (*LdsDummy) Descriptor() ([]byte, []int) { return fileDescriptor_34e2cd84a105bcd1, []int{0} } func (m *LdsDummy)
(b []byte) error { return m.Unmarshal(b) } func (m *LdsDummy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { if deterministic { return xxx_messageInfo_LdsDummy.Marshal(b, m, deterministic) } else { b = b[:cap(b)] n, err := m.MarshalToSizedBuffer(b) if err != nil { return nil, err } return b[:n], nil } } func (m *LdsDummy) XXX_Merge(src proto.Message) { xxx_messageInfo_LdsDummy.Merge(m, src) } func (m *LdsDummy) XXX_Size() int { return m.Size() } func (m *LdsDummy) XXX_DiscardUnknown() { xxx_messageInfo_LdsDummy.DiscardUnknown(m) } var xxx_messageInfo_LdsDummy proto.InternalMessageInfo func init() { proto.RegisterType((*LdsDummy)(nil), "envoy.api.v2.LdsDummy") } func init() { proto.RegisterFile("envoy/api/v2/lds.proto", fileDescriptor_34e2cd84a105bcd1) } var fileDescriptor_34e2cd84a105bcd1 = []byte{ // 410 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4b, 0xcd, 0x2b, 0xcb, 0xaf, 0xd4, 0x4f, 0x2c, 0xc8, 0xd4, 0x2f, 0x33, 0xd2, 0xcf, 0x49, 0x29, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x01, 0x8b, 0xeb, 0x25, 0x16, 0x64, 0xea, 0x95, 0x19, 0x49, 0xc9, 0xa0, 0xa8, 0x4a, 0xc9, 0x2c, 0x4e, 0xce, 0x2f, 0x4b, 0x2d, 0xaa, 0x84, 0xa8, 0x95, 0x92, 0x49, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0x05, 0x4b, 0x27, 0xe6, 0xe5, 0xe5, 0x97, 0x24, 0x96, 0x64, 0xe6, 0xe7, 0x41, 0x4d, 0x92, 0x92, 0x83, 0xca, 0x82, 0x79, 0x49, 0xa5, 0x69, 0xfa, 0x29, 0xa5, 0x45, 0x60, 0x05, 0xb8, 0xe4, 0xcb, 0x8b, 0x12, 0x0b, 0x0a, 0x52, 0x8b, 0x60, 0xfa, 0x15, 0xa0, 0x76, 0x23, 0x0c, 0xd6, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x85, 0x99, 0x50, 0x9a, 0x52, 0x90, 0x88, 0xa2, 0x20, 0x37, 0x33, 0xbd, 0x28, 0xb1, 0x04, 0x26, 0x2f, 0x8b, 0x21, 0x5f, 0x5c, 0x92, 0x58, 0x52, 0x0a, 0xb3, 0x40, 0xbc, 0x2c, 0x31, 0x27, 0x33, 0x25, 0xb1, 0x24, 0x55, 0x1f, 0xc6, 0x80, 0x4a, 0x48, 0xa3, 0x86, 0x4d, 0x66, 0x71, 0x49, 0x6a, 0x5e, 0x6a, 0x11, 0x44, 0x52, 0x89, 0x8b, 0x8b, 0xc3, 0x27, 0xa5, 0xd8, 0xa5, 0x34, 0x37, 0xb7, 0xd2, 0xe8, 0x33, 0x13, 0x97, 0x84, 0x0f, 0x54, 0xda, 0x05, 0x16, 0x38, 0xc1, 0xa9, 0x45, 0x65, 0x99, 0xc9, 0xa9, 0x42, 0x89, 0x5c, 0x7c, 0x2e, 0xa9, 0x39, 0x25, 0x89, 0x30, 0x05, 0xc5, 0x42, 0xca, 0x7a, 0xc8, 0x81, 0xab, 0x07, 0x96, 0x85, 0x6b, 0x0b, 0x4a, 0x2d, 0x2c, 0x4d, 0x2d, 0x2e, 0x91, 0x52, 0xc1, 0xaf, 0xa8, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x89, 0x41, 0x83, 0xd1, 0x80, 0x51, 0x28, 0x82, 0x8b, 0x3f, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x17, 0x61, 0x87, 0x1c, 0x9a, 0x76, 0x74, 0xe3, 0xe5, 0x71, 0xca, 0xa3, 0x98, 0x5c, 0xcd, 0xc5, 0xe7, 0x96, 0x5a, 0x92, 0x9c, 0x41, 0x45, 0x83, 0x35, 0x9a, 0x2e, 0x3f, 0x99, 0xcc, 0x24, 0xa9, 0x24, 0x8e, 0x92, 0x94, 0xac, 0x60, 0xc1, 0x5b, 0x0c, 0x96, 0x66, 0xb6, 0x62, 0xd4, 0x92, 0x92, 0xed, 0x5a, 0x32, 0xed, 0x33, 0xbb, 0x38, 0x97, 0x28, 0x8a, 0x89, 0x30, 0xa7, 0x38, 0x25, 0x9c, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x9f, 0x66, 0xfc, 0xeb, 0x67, 0x95, 0x16, 0x92, 0x84, 0x28, 0x2d, 0x86, 0x84, 0xbd, 0x1e, 0x3c, 0xce, 0xca, 0x8c, 0x77, 0x35, 0x9c, 0xb8, 0xc8, 0xc6, 0x24, 0xc0, 0xc0, 0x25, 0x95, 0x99, 0x0f, 0x71, 0x62, 0x41, 0x51, 0x7e, 0x45, 0x25, 0x8a, 0x6b, 0x9d, 0x40, 0x51, 0x1a, 0x00, 0x8a, 0xde, 0x00, 0xc6, 0x0e, 0x46, 0xc6, 0x00, 0x8e, 0x24, 0x36, 0x70, 0x64, 0x1b, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd4, 0x85, 0x49, 0xac, 0x27, 0x03, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConn // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion4 // ListenerDiscoveryServiceClient is the client API for ListenerDiscoveryService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type ListenerDiscoveryServiceClient interface { DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) } type listenerDiscoveryServiceClient struct { cc *grpc.ClientConn } func NewListenerDiscoveryServiceClient(cc *grpc.ClientConn) ListenerDiscoveryServiceClient { return &listenerDiscoveryServiceClient{cc} } func (c *listenerDiscoveryServiceClient) DeltaListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_DeltaListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[0], "/envoy.api.v2.ListenerDiscoveryService/DeltaListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceDeltaListenersClient{stream} return x, nil } type ListenerDiscoveryService_DeltaListenersClient interface { Send(*DeltaDiscoveryRequest) error Recv() (*DeltaDiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceDeltaListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceDeltaListenersClient) Send(m *DeltaDiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersClient) Recv() (*DeltaDiscoveryResponse, error) { m := new(DeltaDiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) StreamListeners(ctx context.Context, opts ...grpc.CallOption) (ListenerDiscoveryService_StreamListenersClient, error) { stream, err := c.cc.NewStream(ctx, &_ListenerDiscoveryService_serviceDesc.Streams[1], "/envoy.api.v2.ListenerDiscoveryService/StreamListeners", opts...) if err != nil { return nil, err } x := &listenerDiscoveryServiceStreamListenersClient{stream} return x, nil } type ListenerDiscoveryService_StreamListenersClient interface { Send(*DiscoveryRequest) error Recv() (*DiscoveryResponse, error) grpc.ClientStream } type listenerDiscoveryServiceStreamListenersClient struct { grpc.ClientStream } func (x *listenerDiscoveryServiceStreamListenersClient) Send(m *DiscoveryRequest) error { return x.ClientStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersClient) Recv() (*DiscoveryResponse, error) { m := new(DiscoveryResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *listenerDiscoveryServiceClient) FetchListeners(ctx context.Context, in *DiscoveryRequest, opts ...grpc.CallOption) (*DiscoveryResponse, error) { out := new(DiscoveryResponse) err := c.cc.Invoke(ctx, "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", in, out, opts...) if err != nil { return nil, err } return out, nil } // ListenerDiscoveryServiceServer is the server API for ListenerDiscoveryService service. type ListenerDiscoveryServiceServer interface { DeltaListeners(ListenerDiscoveryService_DeltaListenersServer) error StreamListeners(ListenerDiscoveryService_StreamListenersServer) error FetchListeners(context.Context, *DiscoveryRequest) (*DiscoveryResponse, error) } // UnimplementedListenerDiscoveryServiceServer can be embedded to have forward compatible implementations. type UnimplementedListenerDiscoveryServiceServer struct { } func (*UnimplementedListenerDiscoveryServiceServer) DeltaListeners(srv ListenerDiscoveryService_DeltaListenersServer) error { return status.Errorf(codes.Unimplemented, "method DeltaListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) StreamListeners(srv ListenerDiscoveryService_StreamListenersServer) error { return status.Errorf(codes.Unimplemented, "method StreamListeners not implemented") } func (*UnimplementedListenerDiscoveryServiceServer) FetchListeners(ctx context.Context, req *DiscoveryRequest) (*DiscoveryResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method FetchListeners not implemented") } func RegisterListenerDiscoveryServiceServer(s *grpc.Server, srv ListenerDiscoveryServiceServer) { s.RegisterService(&_ListenerDiscoveryService_serviceDesc, srv) } func _ListenerDiscoveryService_DeltaListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).DeltaListeners(&listenerDiscoveryServiceDeltaListenersServer{stream}) } type ListenerDiscoveryService_DeltaListenersServer interface { Send(*DeltaDiscoveryResponse) error Recv() (*DeltaDiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceDeltaListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceDeltaListenersServer) Send(m *DeltaDiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceDeltaListenersServer) Recv() (*DeltaDiscoveryRequest, error) { m := new(DeltaDiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_StreamListeners_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(ListenerDiscoveryServiceServer).StreamListeners(&listenerDiscoveryServiceStreamListenersServer{stream}) } type ListenerDiscoveryService_StreamListenersServer interface { Send(*DiscoveryResponse) error Recv() (*DiscoveryRequest, error) grpc.ServerStream } type listenerDiscoveryServiceStreamListenersServer struct { grpc.ServerStream } func (x *listenerDiscoveryServiceStreamListenersServer) Send(m *DiscoveryResponse) error { return x.ServerStream.SendMsg(m) } func (x *listenerDiscoveryServiceStreamListenersServer) Recv() (*DiscoveryRequest, error) { m := new(DiscoveryRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _ListenerDiscoveryService_FetchListeners_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(DiscoveryRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/envoy.api.v2.ListenerDiscoveryService/FetchListeners", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(ListenerDiscoveryServiceServer).FetchListeners(ctx, req.(*DiscoveryRequest)) } return interceptor(ctx, in, info, handler) } var _ListenerDiscoveryService_serviceDesc = grpc.ServiceDesc{ ServiceName: "envoy.api.v2.ListenerDiscoveryService", HandlerType: (*ListenerDiscoveryServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "FetchListeners", Handler: _ListenerDiscoveryService_FetchListeners_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "DeltaListeners", Handler: _ListenerDiscoveryService_DeltaListeners_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "StreamListeners", Handler: _ListenerDiscoveryService_StreamListeners_Handler, ServerStreams: true, ClientStreams: true, }, }, Metadata: "envoy/api/v2/lds.proto", } func (m *LdsDummy) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) n, err := m.MarshalToSizedBuffer(dAtA[:size]) if err != nil { return nil, err } return dAtA[:n], nil } func (m *LdsDummy) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) } func (m *LdsDummy) MarshalToSizedBuffer(dAtA []byte) (int, error) { i := len(dAtA) _ = i var l int _ = l if m.XXX_unrecognized != nil { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } return len(dAtA) - i, nil } func encodeVarintLds(dAtA []byte, offset int, v uint64) int { offset -= sovLds(v) base := offset for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } dAtA[offset] = uint8(v) return base } func (m *LdsDummy) Size() (n int) { if m == nil { return 0 } var l int _ = l if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } return n } func sovLds(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } func sozLds(x uint64) (n int) { return sovLds(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *LdsDummy) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowLds } if iNdEx >= l { return io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= uint64(b&0x7F) << shift if b < 0x80 { break } } fieldNum := int32(wire >> 3) wireType := int(wire & 0x7) if wireType == 4 { return fmt.Errorf("proto: LdsDummy: wiretype end group for non-group") } if fieldNum <= 0 { return fmt.Errorf("proto: LdsDummy: illegal tag %d (wire type %d)", fieldNum, wire) } switch fieldNum { default: iNdEx = preIndex skippy, err := skipLds(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) < 0 { return ErrInvalidLengthLds } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } if iNdEx > l { return io.ErrUnexpectedEOF } return nil } func skipLds(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 depth := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { break } } wireType := int(wire & 0x7) switch wireType { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } iNdEx++ if dAtA[iNdEx-1] < 0x80 { break } } case 1: iNdEx += 8 case 2: var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { return 0, ErrIntOverflowLds } if iNdEx >= l { return 0, io.ErrUnexpectedEOF } b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { break } } if length < 0 { return 0, ErrInvalidLengthLds } iNdEx += length case 3: depth++ case 4: if depth == 0 { return 0, ErrUnexpectedEndOfGroupLds } depth-- case 5: iNdEx += 4 default: return 0, fmt.Errorf("proto: illegal wireType %d", wireType) } if iNdEx < 0 { return 0, ErrInvalidLengthLds } if depth == 0 { return iNdEx, nil } } return 0, io.ErrUnexpectedEOF } var ( ErrInvalidLengthLds = fmt.Errorf("proto: negative length found during unmarshaling") ErrIntOverflowLds = fmt.Errorf("proto: integer overflow") ErrUnexpectedEndOfGroupLds = fmt.Errorf("proto: unexpected end of group") )
XXX_Unmarshal
identifier_name
chat.js
// pages/contact/contact.js var util = require('../../utils/util.js') const app = getApp(); var api = require('../../config/api.js'); var inputVal = ''; var msgList = []; var windowWidth = wx.getSystemInfoSync().windowWidth; var windowHeight = wx.getSystemInfoSync().windowHeight; var keyHeight = 0; var SocketTask; var socketOpen = false; Page({ /** * 页面的初始数据 */ data: { scrollHeight: '100vh', inputBottom: 0, active: -1, show: false, showTop: true }, /** * 生命周期函数--监听页面加载 */ onLoad: function(options) { console.log(options); var that = this; that.initProduct(options); that.initLocalHostHis(); that.setData({ cusHeadIcon: app.globalData.userInfo.avatarUrl, }); }, /** * 生命周期函数--监听页面显示 */ onShow: function() { var that = this; if (!socketOpen) { that.webSocket() } }, /** * 页面相关事件处理函数--监听用户下拉动作 */ onPullDownRefresh: function() { }, /** * 页面上拉触底事件的处理函数 */ onReachBottom: function() { }, /** * 连接websocket * */ webSocket: function() { var that = this; var token = wx.getStorageSync(api.HeadToken); if (!token) { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { wx.navigateTo({ url: '../mine/mine', }) }, 1000) } else { wx.request({ url: api.CheckUser, header: { 'content-type': 'application/json', 'Authorization': token // 缓存中token信息 }, success: function(res) { if (res.data.status == 200) { // 进行判断 if (res.data.data == that.data.itemInfo.openid){ wx.showToast({ title: "不可与自己聊天", image: '../../images/hint1.png', mask: true, }) setTimeout(function () { //要延时执行的代码 wx.navigateBack({ delta: 1 }); }, 1000) } that.setData({ myOpenId: res.data.data }) // 连接websocket SocketTask = wx.connectSocket({ url: api.WsIM + res.data.data + '/' + that.data.itemInfo.openid, header: { 'content-type': 'application/json' }, method: 'GET', success: function(res) {}, fail: function(err) { wx.showToast({ title: '网络异常!', }) console.log(err) }, }) that.initWebsocket(); } else if (res.data.status == 402) { wx.showToast({ title: res.data.msg, image: '../../images/hint2.png', mask: true, }) } else { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { //要延时执行的代码 wx.navigateTo({ url: '../mine/mine', }) }, 1000) } } }) } }, /**初始化websocket */ initWebsocket: function() { var that = this; SocketTask.onOpen(res => { socketOpen = true; console.log('监听 WebSocket 连接打开事件。', res) }) SocketTask.onClose(onClose => { console.log('监听 WebSocket 连接关闭事件。', onClose) socketOpen = false; // 更新聊天列表 const value = wx.getStorageSync(api.IM); let chatList = []; if (value) { chatList = JSON.parse(value); } for (let i = 0; i < chatList.length; i++) { if (chatList[i].openId == that.data.itemInfo.openid) { if (msgList.length > 0) { chatList[i].lastMsg = msgList[msgList.length - 1].content; chatList[i].lastDate = msgList[msgList.length - 1].createTime; break; } if (i == chatList.length - 1) { chatList.push(msgList[msgList.length - 1]); } } } wx.setStorageSync(api.IM, JSON.stringify(chatList)); wx.setStorageSync(api.IM_Prefix + that.data.itemInfo.openid, JSON.stringify(msgList)) }) SocketTask.onError(onError => { console.log('监听 WebSocket 错误。错误信息', onError) socketOpen = false }) SocketTask.onMessage(onMessage => { var msg = JSON.parse(onMessage.data); if (msg.status != null) { // 历史记录 msgList = util.fomartMsgList(msgList.concat(msg.data)); that.setData({ msgList, his_index: msgList.length - 1, today: util.toWeiXinString(new Date()) }); } else { // 非历史记录 msg.createTime = new Date(); msgList.push(formatNewMsg(msg)); that.setData({ msgList }) } that.setData({ toView: 'msg-' + (msgList.length - 1) }) }) }, // 加载本地聊天记录 initLocalHostHis: function() { var that = this; const his = wx.getStorageSync(api.IM_Prefix + that.data.itemInfo.openid); if (!his || his == null || his == '') { return; } msgList = JSON.parse(his); inputVal = ''; that.setData({ msgList, inputVal }) }, // 获取商家信息 initProduct: function(options) { var that = this; that.setData({ itemInfo: options })
/** * 获取聚焦 */ focus: function(e) { keyHeight = e.detail.height; this.setData({ scrollHeight: (windowHeight - keyHeight) + 'px' }); this.setData({ toView: 'msg-' + (msgList.length - 1), inputBottom: keyHeight + 'px' }) }, //失去聚焦(软键盘消失) blur: function(e) { this.setData({ scrollHeight: '100vh', inputBottom: 0 }) this.setData({ toView: 'msg-' + (msgList.length - 1) }) }, /** * 发送点击监听 */ sendClick: function(e) { let that = this; var data = e.detail.value; if (data.trim() == '') { return; } if (!socketOpen) { that.webSocket() } var msg = { msgType: 1, content: data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 1, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data })); inputVal = ''; this.setData({ msgList, inputVal, toView: 'msg-' + (msgList.length - 1), }) }, // 发送商品信息 sendItem: function() { let that = this; if (!socketOpen) { that.webSocket() } var msg = { msgType: 3, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 3, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title })); this.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) }, /** * 退回上一页 */ toBackClick: function() { wx.navigateBack({}) }, /** * 生命周期函数--监听页面隐藏 */ onHide: function() { SocketTask.close(function(close) {}) }, /** * 生命周期函数--监听页面卸载 */ onUnload: function() { SocketTask.close(function(close) {}) }, sendMsgImg() { let that = this; if (!socketOpen) { that.webSocket() } wx.chooseImage({ count: 1, sizeType: ['original', 'compressed'], sourceType: ['album', 'camera'], success: function (res) { var tempFilePaths = res.tempFilePaths; var token = wx.getStorageSync('Authorization'); wx.uploadFile({ url: api.ChatUploadFile, filePath: tempFilePaths[0], name: 'uploadFile', header: { "Content-Type": "multipart/form-data", 'Authorization': token // 缓存中token信息 }, success: function (res) { var data = JSON.parse(res.data); if(data.status==200){ var msg = { msgType: 2, content: data.data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 2, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data.data })); that.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) } else { wx.showToast({ title: "发布失败", image: '../../images/hint1.png', mask: true, }) } } }); } }) }, // 预览图片 previewImage: function (e) { var current = e.target.dataset.src; console.log(current); wx.previewImage({ current: current, // 当前显示图片的http链接 urls: msgList.filter(item => item.msgType == 2).map(function (item) { return item.content; }) }) }, onChange(event) { if(event.detail===3) { this.setData({ showTop: false }); return; } if (event.detail === 0) { this.setData({ contact: {value: this.data.itemInfo.phone, type: '电话'} }); } if (event.detail === 1) { this.setData({ contact: { value: this.data.itemInfo.wechat, type: '微信' } }); } if (event.detail === 2) { this.setData({ contact: { value: this.data.itemInfo.qq, type: 'QQ' } }); } this.setData({ show: true }); }, onClose() { this.setData({ show: false}); } }) function sendSocketMessage(msg) { SocketTask.send({ data: msg }) } function formatNewMsg(msg) { if (msgList != null && msgList.length > 0) { let createTimeFirst = new Date(msg.createTime).getTime(); let createTimeSecond = new Date(msgList[msgList.length - 1].createTime).getTime(); let minute = (createTimeFirst - createTimeSecond) / 1000 / 60; if (minute > 5) { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } } else { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } return msg; }
},
random_line_split
chat.js
// pages/contact/contact.js var util = require('../../utils/util.js') const app = getApp(); var api = require('../../config/api.js'); var inputVal = ''; var msgList = []; var windowWidth = wx.getSystemInfoSync().windowWidth; var windowHeight = wx.getSystemInfoSync().windowHeight; var keyHeight = 0; var SocketTask; var socketOpen = false; Page({ /** * 页面的初始数据 */ data: { scrollHeight: '100vh', inputBottom: 0, active: -1, show: false, showTop: true }, /** * 生命周期函数--监听页面加载 */ onLoad: function(options) { console.log(options); var that = this; that.initProduct(options); that.initLocalHostHis(); that.setData({ cusHeadIcon: app.globalData.userInfo.avatarUrl, }); }, /** * 生命周期函数--监听页面显示 */ onShow: function() { var that = this; if (!socketOpen) { that.webSocket() } }, /** * 页面相关事件处理函数--监听用户下拉动作 */ onPullDownRefresh: function() { }, /** * 页面上拉触底事件的处理函数 */ onReachBottom: function() { }, /** * 连接websocket * */ webSocket: function() { var that = this; var token = wx.getStorageSync(api.HeadToken); if (!token) { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { wx.navigateTo({ url: '../mine/mine', }) }, 1000) } else { wx.request({ url: api.CheckUser, header: { 'content-type': 'application/json', 'Authorization': token // 缓存中token信息 }, success: function(res) { if (res.data.status == 200) { // 进行判断 if (res.data.data == that.data.itemInfo.openid){ wx.showToast({ title: "不可与自己聊天",
wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { //要延时执行的代码 wx.navigateTo({ url: '../mine/mine', }) }, 1000) } } }) } }, /**初始化websocket */ initWebsocket: function() { var that = this; SocketTask.onOpen(res => { socketOpen = true; console.log('监听 WebSocket 连接打开事件。', res) }) SocketTask.onClose(onClose => { console.log('监听 WebSocket 连接关闭事件。', onClose) socketOpen = false; // 更新聊天列表 const value = wx.getStorageSync(api.IM); let chatList = []; if (value) { chatList = JSON.parse(value); } for (let i = 0; i < chatList.length; i++) { if (chatList[i].openId == that.data.itemInfo.openid) { if (msgList.length > 0) { chatList[i].lastMsg = msgList[msgList.length - 1].content; chatList[i].lastDate = msgList[msgList.length - 1].createTime; break; } if (i == chatList.length - 1) { chatList.push(msgList[msgList.length - 1]); } } } wx.setStorageSync(api.IM, JSON.stringify(chatList)); wx.setStorageSync(api.IM_Prefix + that.data.itemInfo.openid, JSON.stringify(msgList)) }) SocketTask.onError(onError => { console.log('监听 WebSocket 错误。错误信息', onError) socketOpen = false }) SocketTask.onMessage(onMessage => { var msg = JSON.parse(onMessage.data); if (msg.status != null) { // 历史记录 msgList = util.fomartMsgList(msgList.concat(msg.data)); that.setData({ msgList, his_index: msgList.length - 1, today: util.toWeiXinString(new Date()) }); } else { // 非历史记录 msg.createTime = new Date(); msgList.push(formatNewMsg(msg)); that.setData({ msgList }) } that.setData({ toView: 'msg-' + (msgList.length - 1) }) }) }, // 加载本地聊天记录 initLocalHostHis: function() { var that = this; const his = wx.getStorageSync(api.IM_Prefix + that.data.itemInfo.openid); if (!his || his == null || his == '') { return; } msgList = JSON.parse(his); inputVal = ''; that.setData({ msgList, inputVal }) }, // 获取商家信息 initProduct: function(options) { var that = this; that.setData({ itemInfo: options }) }, /** * 获取聚焦 */ focus: function(e) { keyHeight = e.detail.height; this.setData({ scrollHeight: (windowHeight - keyHeight) + 'px' }); this.setData({ toView: 'msg-' + (msgList.length - 1), inputBottom: keyHeight + 'px' }) }, //失去聚焦(软键盘消失) blur: function(e) { this.setData({ scrollHeight: '100vh', inputBottom: 0 }) this.setData({ toView: 'msg-' + (msgList.length - 1) }) }, /** * 发送点击监听 */ sendClick: function(e) { let that = this; var data = e.detail.value; if (data.trim() == '') { return; } if (!socketOpen) { that.webSocket() } var msg = { msgType: 1, content: data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 1, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data })); inputVal = ''; this.setData({ msgList, inputVal, toView: 'msg-' + (msgList.length - 1), }) }, // 发送商品信息 sendItem: function() { let that = this; if (!socketOpen) { that.webSocket() } var msg = { msgType: 3, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 3, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title })); this.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) }, /** * 退回上一页 */ toBackClick: function() { wx.navigateBack({}) }, /** * 生命周期函数--监听页面隐藏 */ onHide: function() { SocketTask.close(function(close) {}) }, /** * 生命周期函数--监听页面卸载 */ onUnload: function() { SocketTask.close(function(close) {}) }, sendMsgImg() { let that = this; if (!socketOpen) { that.webSocket() } wx.chooseImage({ count: 1, sizeType: ['original', 'compressed'], sourceType: ['album', 'camera'], success: function (res) { var tempFilePaths = res.tempFilePaths; var token = wx.getStorageSync('Authorization'); wx.uploadFile({ url: api.ChatUploadFile, filePath: tempFilePaths[0], name: 'uploadFile', header: { "Content-Type": "multipart/form-data", 'Authorization': token // 缓存中token信息 }, success: function (res) { var data = JSON.parse(res.data); if(data.status==200){ var msg = { msgType: 2, content: data.data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 2, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data.data })); that.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) } else { wx.showToast({ title: "发布失败", image: '../../images/hint1.png', mask: true, }) } } }); } }) }, // 预览图片 previewImage: function (e) { var current = e.target.dataset.src; console.log(current); wx.previewImage({ current: current, // 当前显示图片的http链接 urls: msgList.filter(item => item.msgType == 2).map(function (item) { return item.content; }) }) }, onChange(event) { if(event.detail===3) { this.setData({ showTop: false }); return; } if (event.detail === 0) { this.setData({ contact: {value: this.data.itemInfo.phone, type: '电话'} }); } if (event.detail === 1) { this.setData({ contact: { value: this.data.itemInfo.wechat, type: '微信' } }); } if (event.detail === 2) { this.setData({ contact: { value: this.data.itemInfo.qq, type: 'QQ' } }); } this.setData({ show: true }); }, onClose() { this.setData({ show: false}); } }) function sendSocketMessage(msg) { SocketTask.send({ data: msg }) } function formatNewMsg(msg) { if (msgList != null && msgList.length > 0) { let createTimeFirst = new Date(msg.createTime).getTime(); let createTimeSecond = new Date(msgList[msgList.length - 1].createTime).getTime(); let minute = (createTimeFirst - createTimeSecond) / 1000 / 60; if (minute > 5) { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } } else { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } return msg; }
image: '../../images/hint1.png', mask: true, }) setTimeout(function () { //要延时执行的代码 wx.navigateBack({ delta: 1 }); }, 1000) } that.setData({ myOpenId: res.data.data }) // 连接websocket SocketTask = wx.connectSocket({ url: api.WsIM + res.data.data + '/' + that.data.itemInfo.openid, header: { 'content-type': 'application/json' }, method: 'GET', success: function(res) {}, fail: function(err) { wx.showToast({ title: '网络异常!', }) console.log(err) }, }) that.initWebsocket(); } else if (res.data.status == 402) { wx.showToast({ title: res.data.msg, image: '../../images/hint2.png', mask: true, }) } else {
conditional_block
chat.js
// pages/contact/contact.js var util = require('../../utils/util.js') const app = getApp(); var api = require('../../config/api.js'); var inputVal = ''; var msgList = []; var windowWidth = wx.getSystemInfoSync().windowWidth; var windowHeight = wx.getSystemInfoSync().windowHeight; var keyHeight = 0; var SocketTask; var socketOpen = false; Page({ /** * 页面的初始数据 */ data: { scrollHeight: '100vh', inputBottom: 0, active: -1, show: false, showTop: true }, /** * 生命周期函数--监听页面加载 */ onLoad: function(options) { console.log(options); var that = this; that.initProduct(options); that.initLocalHostHis(); that.setData({ cusHeadIcon: app.globalData.userInfo.avatarUrl, }); }, /** * 生命周期函数--监听页面显示 */ onShow: function() { var that = this; if (!socketOpen) { that.webSocket() } }, /** * 页面相关事件处理函数--监听用户下拉动作 */ onPullDownRefresh: function() { }, /** * 页面上拉触底事件的处理函数 */ onReachBottom: function() { }, /** * 连接websocket * */ webSocket: function() { var that = this; var token = wx.getStorageSync(api.HeadToken); if (!token) { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { wx.navigateTo({ url: '../mine/mine', }) }, 1000) } else { wx.request({ url: api.CheckUser, header: { 'content-type': 'application/json', 'Authorization': token // 缓存中token信息 }, success: function(res) { if (res.data.status == 200) { // 进行判断 if (res.data.data == that.data.itemInfo.openid){ wx.showToast({ title: "不可与自己聊天", image: '../../images/hint1.png', mask: true, }) setTimeout(function () { //要延时执行的代码 wx.navigateBack({ delta: 1 }); }, 1000) } that.setData({ myOpenId: res.data.data }) // 连接websocket SocketTask = wx.connectSocket({ url: api.WsIM + res.data.data + '/' + that.data.itemInfo.openid, header: { 'content-type': 'application/json' }, method: 'GET', success: function(res) {}, fail: function(err) { wx.showToast({ title: '网络异常!', }) console.log(err) }, }) that.initWebsocket(); } else if (res.data.status == 402) { wx.showToast({ title: res.data.msg, image: '../../images/hint2.png', mask: true, }) } else { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { //要延时执行的代码 wx.navigateTo({ url: '../mine/mine', }) }, 1000) } } }) } }, /**初始化websocket */ initWebsocket: function() { var that = this; SocketTask.onOpen(res => { socketOpen = true; console.log('监听 WebSocket 连接打开事件。', res) }) SocketTask.onClose(onClose => { console.log('监听 WebSocket 连接关闭事件。', onClose) socketOpen = false; // 更新聊天列表 const value = wx.getStorageSync(api.IM); let chatList = []; if (value) { chatList = JSON.parse(value); } for (let i = 0; i < chatList.length; i++) { if (chatList[i].openId == that.data.itemInfo.openid) { if (msgList.length > 0) { chatList[i].lastMsg = msgList[msgList.length - 1].content; chatList[i].lastDate = msgList[msgList.length - 1].createTime; break; } if (i == chatList.length - 1) { chatList.push(msgList[msgList.length - 1]); } } } wx.setStorageSync(api.IM, JSON.stringify(chatList)); wx.setStorageSync(api.IM_Prefix + that.data.itemInfo.openid, JSON.stringify(msgList)) }) SocketTask.onError(onError => { console.log('监听 WebSocket 错误。错误信息', onError) socketOpen = false }) SocketTask.onMessage(onMessage => { var msg = JSON.parse(onMessage.data); if (msg.status != null) { // 历史记录 msgList = util.fomartMsgList(msgList.concat(msg.data)); that.setData({ msgList, his_index: msgList.length - 1, today: util.toWeiXinString(new Date()) }); } else { // 非历史记录 msg.createTime = new Date(); msgList.push(formatNewMsg(msg)); that.setData({ msgList }) } that.setData({ toView: 'msg-' + (msgList.length - 1) }) }) }, // 加载本地聊天记录 initLocalHostHis: function() { var that = this; const his = wx.getStorageSync(api.IM_Prefix + that.data.itemInfo.openid); if (!his || his == null || his == '') { return; } msgList = JSON.parse(his); inputVal = ''; that.setData({ msgList, inputVal }) }, // 获取商家信息 initProduct: function(options) { var that = this; that.setData({ itemInfo: options }) }, /** * 获取聚焦 */ focus: function(e) { keyHeight = e.detail.height; this.setData({ scrollHeight: (windowHeight - keyHeight) + 'px' }); this.setData({ toView: 'msg-' + (msgList.length - 1), inputBottom: keyHeight + 'px' }) }, //失去聚焦(软键盘消失) blur: function(e) { this.setData({ scrollHeight: '100vh', inputBottom: 0 }) this.setData({ toView: 'msg-' + (msgList.length - 1) }) }, /** * 发送点击监听 */ sendClick: function(e) { let that = this; var data = e.detail.value; if (data.trim() == '') { return; } if (!socketOpen) { that.webSocket() } var msg = { msgType: 1, content: data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 1, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data })); inputVal = ''; this.setData({ msgList, inputVal, toView: 'msg-' + (msgList.length - 1), }) }, // 发送商品信息 sendItem: function() { let that = this; if (!socketOpen) { that.webSocket() } var msg = { msgType: 3, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 3, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title })); this.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) }, /** * 退回上一页 */ toBackClick: function() { wx.navigateBack({}) }, /** * 生命周期函数--监听页面隐藏 */ onHide: function() { SocketTask.close(function(close) {}) }, /** * 生命周期函数--监听页面卸载 */ onUnload: function() { SocketTask.close(function(close) {}) }, sendMsgImg() { let that = this; if (!socketOpen) { that.webSocket() } wx.chooseImage({ count: 1, sizeType: ['original', 'compressed'], sourceType: ['album', 'camera'], success: function (res) { var tempFilePaths = res.tempFilePaths; var token = wx.getStorageSync('Authorization'); wx.uploadFile({ url: api.ChatUploadFile, filePath: tempFilePaths[0], name: 'uploadFile', header: { "Content-Type": "multipart/form-data", 'Authorization': token // 缓存中token信息 }, success: function (res) { var data = JSON.parse(res.data); if(data.status==200){ var msg = { msgType: 2, content: data.data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 2, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data.data })); that.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) } else { wx.showToast({ title: "发布失败", image: '../../images/hint1.png', mask: true, }) } } }); } }) }, // 预览图片 previewImage: function (e) { var current = e.target.dataset.src; console.log(current); wx.previewImage({ current: current, // 当前显示图片的http链接 urls: msgList.filter(item => item.msgType == 2).map(function (item) { return item.content; }) }) }, onChange(event) { if(event.detail===3) { this.setData({ showTop: false }); return; } if (event.detail === 0) { this.setData({ contact: {value: this.data.itemInfo.phone, type: '电话'} }); } if (event.detail === 1) { this.setData({ contact: { value: this.data.itemInfo.wechat, type: '微信' } }); } if (event.detail === 2) { this.setData({ contact: { value: this.data.itemInfo.qq, type: 'QQ' } }); } this.setData({ show: true }); }, onClose() { this.setData({ show: false}); } }) function sendSocketMessage(msg) { SocketTask.send({ data: msg }) } function formatNewMsg(msg) { if (msgList != null && msgList.length > 0) { let createTimeFirst = new Date(msg.createTime).getTime(); let createTimeSecond = new Date(msgList[msgList.length - 1].createTime).getTime(); let minute = (createTimeFirst - createTimeSecond) / 1000 / 60; if (minute > 5) { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createT
= true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } return msg; }
ime)); } } else { msg.format
identifier_body
chat.js
// pages/contact/contact.js var util = require('../../utils/util.js') const app = getApp(); var api = require('../../config/api.js'); var inputVal = ''; var msgList = []; var windowWidth = wx.getSystemInfoSync().windowWidth; var windowHeight = wx.getSystemInfoSync().windowHeight; var keyHeight = 0; var SocketTask; var socketOpen = false; Page({ /** * 页面的初始数据 */ data: { scrollHeight: '100vh', inputBottom: 0, active: -1, show: false, showTop: true }, /** * 生命周期函数--监听页面加载 */ onLoad: function(options) { console.log(options); var that = this; that.initProduct(options); that.initLocalHostHis(); that.setData({ cusHeadIcon: app.globalData.userInfo.avatarUrl, }); }, /** * 生命周期函数--监听页面显示 */ onShow: function() { var that = this; if (!socketOpen) { that.webSocket() } }, /** * 页面相关事件处理函数--监听用户下拉动作 */ onPullDownRefresh: function() { }, /** * 页面上拉触底事件的处理函数 */ onReachBottom: function() { }, /** * 连接websocket * */ webSocket: function() { var that = this; var token = wx.getStorageSync(api.HeadToken); if (!token) { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { wx.navigateTo({ url: '../mine/mine', }) }, 1000) } else { wx.request({ url: api.CheckUser, header: { 'content-type': 'application/json', 'Authorization': token // 缓存中token信息 }, success: function(res) { if (res.data.status == 200) { // 进行判断 if (res.data.data == that.data.itemInfo.openid){ wx.showToast({ title: "不可与自己聊天", image: '../../images/hint1.png', mask: true, }) setTimeout(function () { //要延时执行的代码 wx.navigateBack({ delta: 1 }); }, 1000) } that.setData({ myOpenId: res.data.data }) // 连接websocket SocketTask = wx.connectSocket({ url: api.WsIM + res.data.data + '/' + that.data.itemInfo.openid, header: { 'content-type': 'application/json' }, method: 'GET', success: function(res) {}, fail: function(err) { wx.showToast({ title: '网络异常!', }) console.log(err) }, }) that.initWebsocket(); } else if (res.data.status == 402) { wx.showToast({ title: res.data.msg, image: '../../images/hint2.png', mask: true, }) } else { wx.showToast({ title: "请授权登录后发布", image: '../../images/hint1.png', mask: true, }) setTimeout(function() { //要延时执行的代码 wx.navigateTo({ url: '../mine/mine', }) }, 1000) } } }) } }, /**初始化websocket */ initWebsocket: function() { var that = this; SocketTask.onOpen(res => { socketOpen = true; console.log('监听 WebSocket 连接打开事件。', res) }) SocketTask.onClose(onClose => { console.log('监听 WebSocket 连接关闭事件。', onClose) socketOpen = false; // 更新聊天列表 const value = wx.getStorageSync(api.IM); let chatList = []; if (value) { chatList = JSON.parse(value); } for (let i = 0; i < chatList.length; i++) { if (chatList[i].openId == that.data.itemInfo.openid) { if (msgList.length > 0) { chatList[i].lastMsg = msgList[msgList.length - 1].content; chatList[i].lastDate = msgList[msgList.length - 1].createTime; break; } if (i == chatList.length - 1) { chatList.push(msgList[msgList.length - 1]); } } } wx.setStorageSync(api.IM, JSON.stringify(chatList)); wx.setStorageSync(api.IM_Prefix + that.data.itemInfo.openid, JSON.stringify(msgList)) }) SocketTask.onError(onError => { console.log('监听 WebSocket 错误。错误信息', onError) socketOpen = false }) SocketTask.onMessage(onMessage => { var msg = JSON.parse(onMessage.data); if (msg.status != null) { // 历史记录 msgList = util.fomartMsgList(msgList.concat(msg.data)); that.setData({ msgList, his_index: msgList.length - 1, today: util.toWeiXinString(new Date()) }); } else { // 非历史记录 msg.createTime = new Date(); msgList.push(formatNewMsg(msg)); that.setData({ msgList }) } that.setData({ toView: 'msg-' + (msgList.length - 1) }) }) }, // 加载本地聊天记录 initLocalHostHis: function() { var that = this; const his = wx.getStorageSync(api.IM_Prefix + that.data.itemInfo.openid); if (!his || his == null || his == '') { return; } msgList = JSON.parse(his); inputVal = ''; that.setData({ msgList, inputVal }) }, // 获取商家信息 initProduct: function(options) { var that = this; that.setData({ itemInfo: options }) }, /** * 获取聚焦 */ focus: function(e) { keyHeight = e.detail.height; this.setData({ scrollHeight: (windowHeight - keyHeight) + 'px' }); this.setData({ toView: 'msg-' + (msgList.length - 1), inputBottom: keyHeight + 'px' }) }, //失去聚焦(软键盘消失) blur: function(e) { this.setData({ scrollHeight: '100vh', inputBottom: 0 }) this.setData({ toView: 'msg-' + (msgList.length - 1) }) }, /** * 发送点击监听 */ sendClick: function(e) { let that = this; var data = e.detail.value; if (data.trim() == '') { return; } if (!socketOpen) { that.webSocket() } var msg = { msgType: 1, content: data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 1, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data })); inputVal = ''; this.setData({ msgList, inputVal, toView: 'msg-' + (msgList.length - 1), }) }, // 发送商品信息 sendItem: function() { let that = this; if (!socketOpen) { that.webSocket() } var msg = { msgType: 3, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 3, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: that.data.itemInfo.id + '_#_' + that.data.itemInfo.img + '_#_' + that.data.itemInfo.title })); this.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) }, /** * 退回上一页 */ toBackClick: function() { wx.navigateBack({}) }, /** * 生命周期函数--监听页面隐藏 */ onHide: function() { SocketTask.close(function(close) {}) }, /** * 生命周期函数--监听页面卸载 */ onUnload: function() { SocketTask.close(function(close) {}) }, sendMsgImg() { let that = this; if (!socketOpen) { that.webSocket() } wx.chooseImage({ count: 1, sizeType: ['original', 'compressed'], sourceType: ['album', 'camera'], success: function (res) { var tempFilePaths = res.tempFilePaths; var token = wx.getStorageSync('Authorization'); wx.uploadFile({ url: api.ChatUploadFile, filePath: tempFilePaths[0], name: 'uploadF
header: { "Content-Type": "multipart/form-data", 'Authorization': token // 缓存中token信息 }, success: function (res) { var data = JSON.parse(res.data); if(data.status==200){ var msg = { msgType: 2, content: data.data }; sendSocketMessage(JSON.stringify(msg)); msgList.push(formatNewMsg({ msgType: 2, sendTo: that.data.itemInfo.openid, createBy: that.data.myOpenId, createTime: new Date(), delFlag: 1, isOffLine: 1, content: data.data })); that.setData({ msgList, toView: 'msg-' + (msgList.length - 1), }) } else { wx.showToast({ title: "发布失败", image: '../../images/hint1.png', mask: true, }) } } }); } }) }, // 预览图片 previewImage: function (e) { var current = e.target.dataset.src; console.log(current); wx.previewImage({ current: current, // 当前显示图片的http链接 urls: msgList.filter(item => item.msgType == 2).map(function (item) { return item.content; }) }) }, onChange(event) { if(event.detail===3) { this.setData({ showTop: false }); return; } if (event.detail === 0) { this.setData({ contact: {value: this.data.itemInfo.phone, type: '电话'} }); } if (event.detail === 1) { this.setData({ contact: { value: this.data.itemInfo.wechat, type: '微信' } }); } if (event.detail === 2) { this.setData({ contact: { value: this.data.itemInfo.qq, type: 'QQ' } }); } this.setData({ show: true }); }, onClose() { this.setData({ show: false}); } }) function sendSocketMessage(msg) { SocketTask.send({ data: msg }) } function formatNewMsg(msg) { if (msgList != null && msgList.length > 0) { let createTimeFirst = new Date(msg.createTime).getTime(); let createTimeSecond = new Date(msgList[msgList.length - 1].createTime).getTime(); let minute = (createTimeFirst - createTimeSecond) / 1000 / 60; if (minute > 5) { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } } else { msg.format = true; msg.time = util.toWeiXinString(new Date(msg.createTime)); } return msg; }
ile',
identifier_name
neexe.rs
use bitflags::bitflags; use byteorder::{ByteOrder, LE}; use custom_error::custom_error; use crate::util::read_pascal_string; use enum_primitive::*; use nom::{apply, count, do_parse, le_u8, le_u16, le_u32, named, named_args, tag, take}; macro_rules! try_parse ( ($result: expr, $error: expr) => (match $result { Ok((_, result)) => result, Err(_) => { return Err($error); } }) ); custom_error!{pub ParseError NotMZ = "invalid MZ header", NotNE = "invalid NE header", SegmentHeader{ segment_number: u16 } = "invalid segment {segment_number} header", SelfLoadHeader = "invalid self-load header" } named!(parse_ne_offset<u16>, do_parse!( tag!("MZ") >> take!(58) >> ne_offset: le_u16 >> (ne_offset) ) ); bitflags!(pub struct NEFlags: u16 { const SINGLE_DATA = 0x0001; const MULTIPLE_DATA = 0x0002; const GLOBAL_INIT = 0x0004; const PROTECTED_MODE = 0x0008; // There seems to be some disagreement as to what these high nibble bits // mean, but they are sometimes set so they should probably not be ignored const WIN32S = 0x0010; const INST_286 = 0x0020; const INST_386 = 0x0040; const INST_X87 = 0x0080; const FULLSCREEN = 0x0100; const CONSOLE = 0x0200; const GUI = 0x0300; const SELF_LOAD = 0x0800; const LINKER_ERROR = 0x2000; const CALL_WEP = 0x4000; const LIB_MODULE = 0x8000; }); #[derive(Clone, Debug)] pub struct
{ pub linker_major_version: u8, pub linker_minor_version: u8, pub entry_table_offset: u16, pub entry_table_size: u16, pub crc: u32, pub flags: NEFlags, pub auto_data_segment_index: u16, pub heap_size: u16, pub stack_size: u16, pub entry_point: u32, pub init_stack_pointer: u32, pub num_segments: u16, pub num_imports: u16, pub non_resident_table_size: u16, pub segment_table_offset: u16, // bytes, from start of NEHeader pub resource_table_offset: u16, pub names_table_offset: u16, pub module_table_offset: u16, pub import_names_table_offset: u16, pub non_resident_table_offset: u32, pub num_movable_entry_point: u16, pub alignment_shift_count: u16, // 1 << alignment_shift_count = logical sector pub num_resources: u16, pub target_os: u8, pub os2_flags: u8, pub thunk_offset: u16, pub segment_thunk_offset: u16, pub min_code_swap_size: u16, pub win_version_minor: u8, pub win_version_major: u8, } bitflags!(pub struct NESegmentFlags: u16 { const CODE = 0x0000; const DATA = 0x0001; const MOVABLE = 0x0010; const PRELOAD = 0x0040; const HAS_RELOC = 0x0100; const PRIORITY = 0xF000; }); named!(read_ne_header<NEHeader>, do_parse!( tag!("NE") >> linker_major_version: le_u8 >> linker_minor_version: le_u8 >> entry_table_offset: le_u16 >> // relative to beginning of header entry_table_size: le_u16 >> // bytes crc: le_u32 >> flags: le_u16 >> auto_data_segment_index: le_u16 >> heap_size: le_u16 >> stack_size: le_u16 >> entry_point: le_u32 >> // cs:ip init_stack_pointer: le_u32 >> // ss:sp num_segments: le_u16 >> num_imports: le_u16 >> non_resident_table_size: le_u16 >> segment_table_offset: le_u16 >> resource_table_offset: le_u16 >> names_table_offset: le_u16 >> module_table_offset: le_u16 >> import_names_table_offset: le_u16 >> non_resident_table_offset: le_u32 >> num_movable_entry_point: le_u16 >> alignment_shift_count: le_u16 >> num_resources: le_u16 >> target_os: le_u8 >> os2_flags: le_u8 >> thunk_offset: le_u16 >> segment_thunk_offset: le_u16 >> min_code_swap_size: le_u16 >> win_version_minor: le_u8 >> win_version_major: le_u8 >> (NEHeader { linker_major_version, linker_minor_version, entry_table_offset, entry_table_size, crc, flags: NEFlags::from_bits_truncate(flags), auto_data_segment_index, heap_size, stack_size, entry_point, init_stack_pointer, num_segments, num_imports, non_resident_table_size, segment_table_offset, resource_table_offset, names_table_offset, module_table_offset, import_names_table_offset, non_resident_table_offset, num_movable_entry_point, alignment_shift_count, num_resources, target_os, os2_flags, thunk_offset, segment_thunk_offset, min_code_swap_size, win_version_minor, win_version_major }) ) ); #[derive(Clone, Debug)] pub struct NESegmentEntry { pub offset: u32, // bytes pub data_size: u32, // bytes pub flags: NESegmentFlags, pub alloc_size: u32, // bytes } named_args!(parse_segment_header(offset_shift: u16)<NESegmentEntry>, do_parse!( offset: le_u16 >> data_size: le_u16 >> flags: le_u16 >> alloc_size: le_u16 >> (NESegmentEntry { offset: u32::from(offset) << offset_shift, data_size: if data_size == 0 { 0x10000 } else { data_size.into() }, flags: NESegmentFlags::from_bits_truncate(flags), alloc_size: if alloc_size == 0 { 0x10000 } else { alloc_size.into() } }) ) ); named_args!(parse_segments(offset_shift: u16, num_segments: u16)<Vec<NESegmentEntry> >, count!(apply!(parse_segment_header, offset_shift), num_segments as usize) ); bitflags!(pub struct NEResourceFlags: u16 { const MOVABLE = 0x10; const PURE = 0x20; const PRELOAD = 0x40; }); enum_from_primitive! { #[derive(Clone, Debug, PartialEq, Eq)] pub enum NEPredefinedResourceKind { Cursor = 1, Bitmap = 2, Icon = 3, Menu = 4, Dialog = 5, StringTable = 6, FontDirectory = 7, FontResource = 8, AcceleratorTable = 9, RawData = 10, MessageTable = 11, GroupCursor = 12, GroupIcon = 14, // NameTable: https://hackernoon.com/win3mu-part-5-windows-3-executable-files-c2affeec0e5 NameTable = 15, Version = 16, DlgInclude = 17, PlugPlay = 19, VXD = 20, AnimatedCursor = 21, AnimatedIcon = 22, HTML = 23, Manifest = 24, } } #[derive(Clone, Debug)] pub enum NEResourceId { Integer(u16), String(String), } #[derive(Clone, Debug, PartialEq, Eq)] pub enum NEResourceKind { Predefined(NEPredefinedResourceKind), Integer(u16), String(String), } #[derive(Clone, Debug)] pub struct NEResourceEntry { pub kind: NEResourceKind, pub id: NEResourceId, pub offset: u32, // bytes pub length: u32, // bytes pub flags: NEResourceFlags, } named_args!(read_resource<'a>(resource_table: &'a [u8], kind: NEResourceKind, offset_shift: u16)<NEResourceEntry>, do_parse!( offset: le_u16 >> // in sectors length: le_u16 >> // in sectors flags: le_u16 >> id: le_u16 >> /* reserved */ le_u32 >> (NEResourceEntry { offset: u32::from(offset) << offset_shift, length: u32::from(length) << offset_shift, flags: NEResourceFlags::from_bits_truncate(flags), kind, id: if id & 0x8000 != 0 { NEResourceId::Integer(id & 0x7fff) } else { NEResourceId::String(read_pascal_string(&resource_table[id as usize..]).unwrap().1) } }) ) ); enum_from_primitive! { #[derive(Clone, Debug)] pub enum NESegmentRelocationSourceKind { LoByte = 0, Segment = 2, FarAddr = 3, Offset = 5, } } #[derive(Clone, Debug)] pub struct NESelfLoadHeader { pub boot_app_offset: u32, pub load_app_seg_offset: u32, } named!(read_selfload_header<NESelfLoadHeader>, do_parse!( tag!("A0") >> take!(2) >> // reserved boot_app_offset: le_u32 >> // segment:offset load_app_seg_offset: le_u32 >> // segment:offset take!(4) >> // reserved take!(4) >> // mem alloc take!(4) >> // ordinal resolve take!(4) >> // exit take!(2 * 4) >> // reserved take!(4) >> // set owner (NESelfLoadHeader { boot_app_offset, load_app_seg_offset }) ) ); const SEGMENT_HEADER_SIZE: u16 = 8; const FIXUP_SIZE: u16 = 8; pub struct NEExecutable<'a> { input: &'a [u8], header: NEHeader, header_offset: u16, // A raw header slice is stored to make it easier to resolve offsets which // are relative to the start of the NE header raw_header: &'a [u8], } pub struct NEResourcesIterator<'a> { table: &'a [u8], index: usize, table_kind: NEResourceKind, offset_shift: u16, block_index: u16, block_len: u16, finished: bool, } impl<'a> NEResourcesIterator<'a> { pub fn new(table: &'a [u8]) -> NEResourcesIterator<'a> { let offset_shift = LE::read_u16(table); let mut iterator = NEResourcesIterator { table, index: 2, table_kind: NEResourceKind::Integer(0xffff), offset_shift, block_index: 0, block_len: 0, finished: false, }; iterator.load_next_block(); iterator } fn load_next_block(&mut self) { let id = LE::read_u16(&self.table[self.index..]); self.finished = id == 0; if !self.finished { self.table_kind = if id & 0x8000 != 0 { let id = id & 0x7fff; if let Some(kind) = NEPredefinedResourceKind::from_u16(id) { NEResourceKind::Predefined(kind) } else { NEResourceKind::Integer(id) } } else { NEResourceKind::String(read_pascal_string(&self.table[self.index + id as usize..]).unwrap().1) }; self.block_index = 0; self.block_len = LE::read_u16(&self.table[self.index + 2..]); self.index += 8; } } } impl<'a> Iterator for NEResourcesIterator<'a> { type Item = NEResourceEntry; fn next(&mut self) -> Option<Self::Item> { if self.block_index == self.block_len { self.load_next_block(); } if self.finished { None } else { let (_, header) = read_resource(&self.table[self.index..], self.table, self.table_kind.clone(), self.offset_shift).unwrap(); self.index += 12; self.block_index += 1; Some(header) } } } impl<'a> NEExecutable<'a> { pub fn new(input: &'a [u8]) -> Result<Self, ParseError> { let header_offset = try_parse!(parse_ne_offset(input), ParseError::NotMZ); let raw_header = &input[header_offset as usize..]; let header = try_parse!(read_ne_header(raw_header), ParseError::NotNE); Ok(NEExecutable { input, header, header_offset, // TODO: Get rid of this raw_header }) } pub fn raw_data(&self) -> &'a [u8] { self.input } pub fn header_offset(&self) -> usize { self.header_offset as usize } pub fn name(&self) -> Option<String> { if self.header.non_resident_table_size == 0 { None } else { let ne_non_resident_table = &self.input[self.header.non_resident_table_offset as usize..]; match read_pascal_string(&ne_non_resident_table) { Ok((_, name)) => Some(name), Err(_) => None } } } pub fn header(&self) -> &NEHeader { &self.header } pub fn selfload_header(&self) -> Result<Option<(NESelfLoadHeader, &[u8])>, ParseError> { if self.header.flags.contains(NEFlags::SELF_LOAD) { Ok(Some(self.selfload_header_impl()?)) } else { Ok(None) } } /// # Arguments /// * segment_number - 1-indexed segment number pub fn segment_header(&self, segment_number: u16) -> Result<NESegmentEntry, ParseError> { assert!(segment_number != 0 || segment_number <= self.header.num_segments, format!("segment number {} is out of range", segment_number)); let offset = self.header.segment_table_offset + ((segment_number - 1) * SEGMENT_HEADER_SIZE); match parse_segment_header(&self.raw_header[offset as usize..], self.header.alignment_shift_count) { Ok((_, header)) => Ok(header), Err(_) => Err(ParseError::SegmentHeader{ segment_number }) } } /// # Arguments /// * segment_number - 1-indexed segment number pub fn segment_data(&self, segment_number: u16) -> Result<&[u8], ParseError> { let header = self.segment_header(segment_number)?; let data = &self.input[header.offset as usize..]; let mut size = header.data_size as usize; if header.flags.contains(NESegmentFlags::HAS_RELOC) { let fixup_table_size = LE::read_u16(&data[size..]) as usize * FIXUP_SIZE as usize; size += fixup_table_size; } Ok(&data[..size]) } pub fn resource_table_alignment_shift(&self) -> Option<u16> { if let Some(table) = self.resource_table_data() { Some(LE::read_u16(table)) } else { None } } pub fn resource_table_data(&self) -> Option<&[u8]> { if self.has_resource_table() { Some(&self.raw_header[self.header.resource_table_offset as usize..]) } else { None } } pub fn iter_resources(&self) -> NEResourcesIterator { if self.has_resource_table() { NEResourcesIterator::new(&self.raw_header[self.header.resource_table_offset as usize..]) } else { NEResourcesIterator { table: self.raw_header, index: 0, table_kind: NEResourceKind::Integer(0xffff), offset_shift: 0, block_index: 1, block_len: 0, finished: true } } } pub fn has_resource_table(&self) -> bool { // In DIRAPI.DLL from Director for Windows, the resource table offset // is non-zero but there is no resource table; the resource table offset // and names table offset are identical. self.header.resource_table_offset != 0 && self.header.resource_table_offset != self.header.names_table_offset } fn selfload_header_impl(&self) -> Result<(NESelfLoadHeader, &[u8]), ParseError> { let segment_data = self.segment_data(1)?; match read_selfload_header(segment_data) { Ok(header) => Ok((header.1, header.0)), Err(_) => Err(ParseError::SelfLoadHeader) } } }
NEHeader
identifier_name
neexe.rs
use bitflags::bitflags; use byteorder::{ByteOrder, LE}; use custom_error::custom_error; use crate::util::read_pascal_string; use enum_primitive::*; use nom::{apply, count, do_parse, le_u8, le_u16, le_u32, named, named_args, tag, take}; macro_rules! try_parse ( ($result: expr, $error: expr) => (match $result { Ok((_, result)) => result, Err(_) => { return Err($error); } }) ); custom_error!{pub ParseError NotMZ = "invalid MZ header", NotNE = "invalid NE header", SegmentHeader{ segment_number: u16 } = "invalid segment {segment_number} header", SelfLoadHeader = "invalid self-load header" } named!(parse_ne_offset<u16>, do_parse!( tag!("MZ") >> take!(58) >> ne_offset: le_u16 >> (ne_offset) ) ); bitflags!(pub struct NEFlags: u16 { const SINGLE_DATA = 0x0001; const MULTIPLE_DATA = 0x0002; const GLOBAL_INIT = 0x0004; const PROTECTED_MODE = 0x0008; // There seems to be some disagreement as to what these high nibble bits // mean, but they are sometimes set so they should probably not be ignored const WIN32S = 0x0010; const INST_286 = 0x0020; const INST_386 = 0x0040; const INST_X87 = 0x0080; const FULLSCREEN = 0x0100; const CONSOLE = 0x0200; const GUI = 0x0300; const SELF_LOAD = 0x0800; const LINKER_ERROR = 0x2000; const CALL_WEP = 0x4000; const LIB_MODULE = 0x8000; }); #[derive(Clone, Debug)] pub struct NEHeader { pub linker_major_version: u8, pub linker_minor_version: u8, pub entry_table_offset: u16, pub entry_table_size: u16, pub crc: u32, pub flags: NEFlags, pub auto_data_segment_index: u16, pub heap_size: u16, pub stack_size: u16, pub entry_point: u32, pub init_stack_pointer: u32, pub num_segments: u16, pub num_imports: u16, pub non_resident_table_size: u16, pub segment_table_offset: u16, // bytes, from start of NEHeader pub resource_table_offset: u16, pub names_table_offset: u16, pub module_table_offset: u16, pub import_names_table_offset: u16, pub non_resident_table_offset: u32, pub num_movable_entry_point: u16, pub alignment_shift_count: u16, // 1 << alignment_shift_count = logical sector pub num_resources: u16, pub target_os: u8, pub os2_flags: u8, pub thunk_offset: u16, pub segment_thunk_offset: u16, pub min_code_swap_size: u16, pub win_version_minor: u8, pub win_version_major: u8, } bitflags!(pub struct NESegmentFlags: u16 { const CODE = 0x0000; const DATA = 0x0001; const MOVABLE = 0x0010; const PRELOAD = 0x0040; const HAS_RELOC = 0x0100; const PRIORITY = 0xF000; }); named!(read_ne_header<NEHeader>, do_parse!( tag!("NE") >> linker_major_version: le_u8 >> linker_minor_version: le_u8 >> entry_table_offset: le_u16 >> // relative to beginning of header entry_table_size: le_u16 >> // bytes crc: le_u32 >> flags: le_u16 >> auto_data_segment_index: le_u16 >> heap_size: le_u16 >> stack_size: le_u16 >> entry_point: le_u32 >> // cs:ip init_stack_pointer: le_u32 >> // ss:sp num_segments: le_u16 >> num_imports: le_u16 >> non_resident_table_size: le_u16 >> segment_table_offset: le_u16 >> resource_table_offset: le_u16 >> names_table_offset: le_u16 >> module_table_offset: le_u16 >> import_names_table_offset: le_u16 >> non_resident_table_offset: le_u32 >> num_movable_entry_point: le_u16 >> alignment_shift_count: le_u16 >> num_resources: le_u16 >> target_os: le_u8 >> os2_flags: le_u8 >> thunk_offset: le_u16 >>
min_code_swap_size: le_u16 >> win_version_minor: le_u8 >> win_version_major: le_u8 >> (NEHeader { linker_major_version, linker_minor_version, entry_table_offset, entry_table_size, crc, flags: NEFlags::from_bits_truncate(flags), auto_data_segment_index, heap_size, stack_size, entry_point, init_stack_pointer, num_segments, num_imports, non_resident_table_size, segment_table_offset, resource_table_offset, names_table_offset, module_table_offset, import_names_table_offset, non_resident_table_offset, num_movable_entry_point, alignment_shift_count, num_resources, target_os, os2_flags, thunk_offset, segment_thunk_offset, min_code_swap_size, win_version_minor, win_version_major }) ) ); #[derive(Clone, Debug)] pub struct NESegmentEntry { pub offset: u32, // bytes pub data_size: u32, // bytes pub flags: NESegmentFlags, pub alloc_size: u32, // bytes } named_args!(parse_segment_header(offset_shift: u16)<NESegmentEntry>, do_parse!( offset: le_u16 >> data_size: le_u16 >> flags: le_u16 >> alloc_size: le_u16 >> (NESegmentEntry { offset: u32::from(offset) << offset_shift, data_size: if data_size == 0 { 0x10000 } else { data_size.into() }, flags: NESegmentFlags::from_bits_truncate(flags), alloc_size: if alloc_size == 0 { 0x10000 } else { alloc_size.into() } }) ) ); named_args!(parse_segments(offset_shift: u16, num_segments: u16)<Vec<NESegmentEntry> >, count!(apply!(parse_segment_header, offset_shift), num_segments as usize) ); bitflags!(pub struct NEResourceFlags: u16 { const MOVABLE = 0x10; const PURE = 0x20; const PRELOAD = 0x40; }); enum_from_primitive! { #[derive(Clone, Debug, PartialEq, Eq)] pub enum NEPredefinedResourceKind { Cursor = 1, Bitmap = 2, Icon = 3, Menu = 4, Dialog = 5, StringTable = 6, FontDirectory = 7, FontResource = 8, AcceleratorTable = 9, RawData = 10, MessageTable = 11, GroupCursor = 12, GroupIcon = 14, // NameTable: https://hackernoon.com/win3mu-part-5-windows-3-executable-files-c2affeec0e5 NameTable = 15, Version = 16, DlgInclude = 17, PlugPlay = 19, VXD = 20, AnimatedCursor = 21, AnimatedIcon = 22, HTML = 23, Manifest = 24, } } #[derive(Clone, Debug)] pub enum NEResourceId { Integer(u16), String(String), } #[derive(Clone, Debug, PartialEq, Eq)] pub enum NEResourceKind { Predefined(NEPredefinedResourceKind), Integer(u16), String(String), } #[derive(Clone, Debug)] pub struct NEResourceEntry { pub kind: NEResourceKind, pub id: NEResourceId, pub offset: u32, // bytes pub length: u32, // bytes pub flags: NEResourceFlags, } named_args!(read_resource<'a>(resource_table: &'a [u8], kind: NEResourceKind, offset_shift: u16)<NEResourceEntry>, do_parse!( offset: le_u16 >> // in sectors length: le_u16 >> // in sectors flags: le_u16 >> id: le_u16 >> /* reserved */ le_u32 >> (NEResourceEntry { offset: u32::from(offset) << offset_shift, length: u32::from(length) << offset_shift, flags: NEResourceFlags::from_bits_truncate(flags), kind, id: if id & 0x8000 != 0 { NEResourceId::Integer(id & 0x7fff) } else { NEResourceId::String(read_pascal_string(&resource_table[id as usize..]).unwrap().1) } }) ) ); enum_from_primitive! { #[derive(Clone, Debug)] pub enum NESegmentRelocationSourceKind { LoByte = 0, Segment = 2, FarAddr = 3, Offset = 5, } } #[derive(Clone, Debug)] pub struct NESelfLoadHeader { pub boot_app_offset: u32, pub load_app_seg_offset: u32, } named!(read_selfload_header<NESelfLoadHeader>, do_parse!( tag!("A0") >> take!(2) >> // reserved boot_app_offset: le_u32 >> // segment:offset load_app_seg_offset: le_u32 >> // segment:offset take!(4) >> // reserved take!(4) >> // mem alloc take!(4) >> // ordinal resolve take!(4) >> // exit take!(2 * 4) >> // reserved take!(4) >> // set owner (NESelfLoadHeader { boot_app_offset, load_app_seg_offset }) ) ); const SEGMENT_HEADER_SIZE: u16 = 8; const FIXUP_SIZE: u16 = 8; pub struct NEExecutable<'a> { input: &'a [u8], header: NEHeader, header_offset: u16, // A raw header slice is stored to make it easier to resolve offsets which // are relative to the start of the NE header raw_header: &'a [u8], } pub struct NEResourcesIterator<'a> { table: &'a [u8], index: usize, table_kind: NEResourceKind, offset_shift: u16, block_index: u16, block_len: u16, finished: bool, } impl<'a> NEResourcesIterator<'a> { pub fn new(table: &'a [u8]) -> NEResourcesIterator<'a> { let offset_shift = LE::read_u16(table); let mut iterator = NEResourcesIterator { table, index: 2, table_kind: NEResourceKind::Integer(0xffff), offset_shift, block_index: 0, block_len: 0, finished: false, }; iterator.load_next_block(); iterator } fn load_next_block(&mut self) { let id = LE::read_u16(&self.table[self.index..]); self.finished = id == 0; if !self.finished { self.table_kind = if id & 0x8000 != 0 { let id = id & 0x7fff; if let Some(kind) = NEPredefinedResourceKind::from_u16(id) { NEResourceKind::Predefined(kind) } else { NEResourceKind::Integer(id) } } else { NEResourceKind::String(read_pascal_string(&self.table[self.index + id as usize..]).unwrap().1) }; self.block_index = 0; self.block_len = LE::read_u16(&self.table[self.index + 2..]); self.index += 8; } } } impl<'a> Iterator for NEResourcesIterator<'a> { type Item = NEResourceEntry; fn next(&mut self) -> Option<Self::Item> { if self.block_index == self.block_len { self.load_next_block(); } if self.finished { None } else { let (_, header) = read_resource(&self.table[self.index..], self.table, self.table_kind.clone(), self.offset_shift).unwrap(); self.index += 12; self.block_index += 1; Some(header) } } } impl<'a> NEExecutable<'a> { pub fn new(input: &'a [u8]) -> Result<Self, ParseError> { let header_offset = try_parse!(parse_ne_offset(input), ParseError::NotMZ); let raw_header = &input[header_offset as usize..]; let header = try_parse!(read_ne_header(raw_header), ParseError::NotNE); Ok(NEExecutable { input, header, header_offset, // TODO: Get rid of this raw_header }) } pub fn raw_data(&self) -> &'a [u8] { self.input } pub fn header_offset(&self) -> usize { self.header_offset as usize } pub fn name(&self) -> Option<String> { if self.header.non_resident_table_size == 0 { None } else { let ne_non_resident_table = &self.input[self.header.non_resident_table_offset as usize..]; match read_pascal_string(&ne_non_resident_table) { Ok((_, name)) => Some(name), Err(_) => None } } } pub fn header(&self) -> &NEHeader { &self.header } pub fn selfload_header(&self) -> Result<Option<(NESelfLoadHeader, &[u8])>, ParseError> { if self.header.flags.contains(NEFlags::SELF_LOAD) { Ok(Some(self.selfload_header_impl()?)) } else { Ok(None) } } /// # Arguments /// * segment_number - 1-indexed segment number pub fn segment_header(&self, segment_number: u16) -> Result<NESegmentEntry, ParseError> { assert!(segment_number != 0 || segment_number <= self.header.num_segments, format!("segment number {} is out of range", segment_number)); let offset = self.header.segment_table_offset + ((segment_number - 1) * SEGMENT_HEADER_SIZE); match parse_segment_header(&self.raw_header[offset as usize..], self.header.alignment_shift_count) { Ok((_, header)) => Ok(header), Err(_) => Err(ParseError::SegmentHeader{ segment_number }) } } /// # Arguments /// * segment_number - 1-indexed segment number pub fn segment_data(&self, segment_number: u16) -> Result<&[u8], ParseError> { let header = self.segment_header(segment_number)?; let data = &self.input[header.offset as usize..]; let mut size = header.data_size as usize; if header.flags.contains(NESegmentFlags::HAS_RELOC) { let fixup_table_size = LE::read_u16(&data[size..]) as usize * FIXUP_SIZE as usize; size += fixup_table_size; } Ok(&data[..size]) } pub fn resource_table_alignment_shift(&self) -> Option<u16> { if let Some(table) = self.resource_table_data() { Some(LE::read_u16(table)) } else { None } } pub fn resource_table_data(&self) -> Option<&[u8]> { if self.has_resource_table() { Some(&self.raw_header[self.header.resource_table_offset as usize..]) } else { None } } pub fn iter_resources(&self) -> NEResourcesIterator { if self.has_resource_table() { NEResourcesIterator::new(&self.raw_header[self.header.resource_table_offset as usize..]) } else { NEResourcesIterator { table: self.raw_header, index: 0, table_kind: NEResourceKind::Integer(0xffff), offset_shift: 0, block_index: 1, block_len: 0, finished: true } } } pub fn has_resource_table(&self) -> bool { // In DIRAPI.DLL from Director for Windows, the resource table offset // is non-zero but there is no resource table; the resource table offset // and names table offset are identical. self.header.resource_table_offset != 0 && self.header.resource_table_offset != self.header.names_table_offset } fn selfload_header_impl(&self) -> Result<(NESelfLoadHeader, &[u8]), ParseError> { let segment_data = self.segment_data(1)?; match read_selfload_header(segment_data) { Ok(header) => Ok((header.1, header.0)), Err(_) => Err(ParseError::SelfLoadHeader) } } }
segment_thunk_offset: le_u16 >>
random_line_split
neexe.rs
use bitflags::bitflags; use byteorder::{ByteOrder, LE}; use custom_error::custom_error; use crate::util::read_pascal_string; use enum_primitive::*; use nom::{apply, count, do_parse, le_u8, le_u16, le_u32, named, named_args, tag, take}; macro_rules! try_parse ( ($result: expr, $error: expr) => (match $result { Ok((_, result)) => result, Err(_) => { return Err($error); } }) ); custom_error!{pub ParseError NotMZ = "invalid MZ header", NotNE = "invalid NE header", SegmentHeader{ segment_number: u16 } = "invalid segment {segment_number} header", SelfLoadHeader = "invalid self-load header" } named!(parse_ne_offset<u16>, do_parse!( tag!("MZ") >> take!(58) >> ne_offset: le_u16 >> (ne_offset) ) ); bitflags!(pub struct NEFlags: u16 { const SINGLE_DATA = 0x0001; const MULTIPLE_DATA = 0x0002; const GLOBAL_INIT = 0x0004; const PROTECTED_MODE = 0x0008; // There seems to be some disagreement as to what these high nibble bits // mean, but they are sometimes set so they should probably not be ignored const WIN32S = 0x0010; const INST_286 = 0x0020; const INST_386 = 0x0040; const INST_X87 = 0x0080; const FULLSCREEN = 0x0100; const CONSOLE = 0x0200; const GUI = 0x0300; const SELF_LOAD = 0x0800; const LINKER_ERROR = 0x2000; const CALL_WEP = 0x4000; const LIB_MODULE = 0x8000; }); #[derive(Clone, Debug)] pub struct NEHeader { pub linker_major_version: u8, pub linker_minor_version: u8, pub entry_table_offset: u16, pub entry_table_size: u16, pub crc: u32, pub flags: NEFlags, pub auto_data_segment_index: u16, pub heap_size: u16, pub stack_size: u16, pub entry_point: u32, pub init_stack_pointer: u32, pub num_segments: u16, pub num_imports: u16, pub non_resident_table_size: u16, pub segment_table_offset: u16, // bytes, from start of NEHeader pub resource_table_offset: u16, pub names_table_offset: u16, pub module_table_offset: u16, pub import_names_table_offset: u16, pub non_resident_table_offset: u32, pub num_movable_entry_point: u16, pub alignment_shift_count: u16, // 1 << alignment_shift_count = logical sector pub num_resources: u16, pub target_os: u8, pub os2_flags: u8, pub thunk_offset: u16, pub segment_thunk_offset: u16, pub min_code_swap_size: u16, pub win_version_minor: u8, pub win_version_major: u8, } bitflags!(pub struct NESegmentFlags: u16 { const CODE = 0x0000; const DATA = 0x0001; const MOVABLE = 0x0010; const PRELOAD = 0x0040; const HAS_RELOC = 0x0100; const PRIORITY = 0xF000; }); named!(read_ne_header<NEHeader>, do_parse!( tag!("NE") >> linker_major_version: le_u8 >> linker_minor_version: le_u8 >> entry_table_offset: le_u16 >> // relative to beginning of header entry_table_size: le_u16 >> // bytes crc: le_u32 >> flags: le_u16 >> auto_data_segment_index: le_u16 >> heap_size: le_u16 >> stack_size: le_u16 >> entry_point: le_u32 >> // cs:ip init_stack_pointer: le_u32 >> // ss:sp num_segments: le_u16 >> num_imports: le_u16 >> non_resident_table_size: le_u16 >> segment_table_offset: le_u16 >> resource_table_offset: le_u16 >> names_table_offset: le_u16 >> module_table_offset: le_u16 >> import_names_table_offset: le_u16 >> non_resident_table_offset: le_u32 >> num_movable_entry_point: le_u16 >> alignment_shift_count: le_u16 >> num_resources: le_u16 >> target_os: le_u8 >> os2_flags: le_u8 >> thunk_offset: le_u16 >> segment_thunk_offset: le_u16 >> min_code_swap_size: le_u16 >> win_version_minor: le_u8 >> win_version_major: le_u8 >> (NEHeader { linker_major_version, linker_minor_version, entry_table_offset, entry_table_size, crc, flags: NEFlags::from_bits_truncate(flags), auto_data_segment_index, heap_size, stack_size, entry_point, init_stack_pointer, num_segments, num_imports, non_resident_table_size, segment_table_offset, resource_table_offset, names_table_offset, module_table_offset, import_names_table_offset, non_resident_table_offset, num_movable_entry_point, alignment_shift_count, num_resources, target_os, os2_flags, thunk_offset, segment_thunk_offset, min_code_swap_size, win_version_minor, win_version_major }) ) ); #[derive(Clone, Debug)] pub struct NESegmentEntry { pub offset: u32, // bytes pub data_size: u32, // bytes pub flags: NESegmentFlags, pub alloc_size: u32, // bytes } named_args!(parse_segment_header(offset_shift: u16)<NESegmentEntry>, do_parse!( offset: le_u16 >> data_size: le_u16 >> flags: le_u16 >> alloc_size: le_u16 >> (NESegmentEntry { offset: u32::from(offset) << offset_shift, data_size: if data_size == 0 { 0x10000 } else { data_size.into() }, flags: NESegmentFlags::from_bits_truncate(flags), alloc_size: if alloc_size == 0 { 0x10000 } else { alloc_size.into() } }) ) ); named_args!(parse_segments(offset_shift: u16, num_segments: u16)<Vec<NESegmentEntry> >, count!(apply!(parse_segment_header, offset_shift), num_segments as usize) ); bitflags!(pub struct NEResourceFlags: u16 { const MOVABLE = 0x10; const PURE = 0x20; const PRELOAD = 0x40; }); enum_from_primitive! { #[derive(Clone, Debug, PartialEq, Eq)] pub enum NEPredefinedResourceKind { Cursor = 1, Bitmap = 2, Icon = 3, Menu = 4, Dialog = 5, StringTable = 6, FontDirectory = 7, FontResource = 8, AcceleratorTable = 9, RawData = 10, MessageTable = 11, GroupCursor = 12, GroupIcon = 14, // NameTable: https://hackernoon.com/win3mu-part-5-windows-3-executable-files-c2affeec0e5 NameTable = 15, Version = 16, DlgInclude = 17, PlugPlay = 19, VXD = 20, AnimatedCursor = 21, AnimatedIcon = 22, HTML = 23, Manifest = 24, } } #[derive(Clone, Debug)] pub enum NEResourceId { Integer(u16), String(String), } #[derive(Clone, Debug, PartialEq, Eq)] pub enum NEResourceKind { Predefined(NEPredefinedResourceKind), Integer(u16), String(String), } #[derive(Clone, Debug)] pub struct NEResourceEntry { pub kind: NEResourceKind, pub id: NEResourceId, pub offset: u32, // bytes pub length: u32, // bytes pub flags: NEResourceFlags, } named_args!(read_resource<'a>(resource_table: &'a [u8], kind: NEResourceKind, offset_shift: u16)<NEResourceEntry>, do_parse!( offset: le_u16 >> // in sectors length: le_u16 >> // in sectors flags: le_u16 >> id: le_u16 >> /* reserved */ le_u32 >> (NEResourceEntry { offset: u32::from(offset) << offset_shift, length: u32::from(length) << offset_shift, flags: NEResourceFlags::from_bits_truncate(flags), kind, id: if id & 0x8000 != 0 { NEResourceId::Integer(id & 0x7fff) } else { NEResourceId::String(read_pascal_string(&resource_table[id as usize..]).unwrap().1) } }) ) ); enum_from_primitive! { #[derive(Clone, Debug)] pub enum NESegmentRelocationSourceKind { LoByte = 0, Segment = 2, FarAddr = 3, Offset = 5, } } #[derive(Clone, Debug)] pub struct NESelfLoadHeader { pub boot_app_offset: u32, pub load_app_seg_offset: u32, } named!(read_selfload_header<NESelfLoadHeader>, do_parse!( tag!("A0") >> take!(2) >> // reserved boot_app_offset: le_u32 >> // segment:offset load_app_seg_offset: le_u32 >> // segment:offset take!(4) >> // reserved take!(4) >> // mem alloc take!(4) >> // ordinal resolve take!(4) >> // exit take!(2 * 4) >> // reserved take!(4) >> // set owner (NESelfLoadHeader { boot_app_offset, load_app_seg_offset }) ) ); const SEGMENT_HEADER_SIZE: u16 = 8; const FIXUP_SIZE: u16 = 8; pub struct NEExecutable<'a> { input: &'a [u8], header: NEHeader, header_offset: u16, // A raw header slice is stored to make it easier to resolve offsets which // are relative to the start of the NE header raw_header: &'a [u8], } pub struct NEResourcesIterator<'a> { table: &'a [u8], index: usize, table_kind: NEResourceKind, offset_shift: u16, block_index: u16, block_len: u16, finished: bool, } impl<'a> NEResourcesIterator<'a> { pub fn new(table: &'a [u8]) -> NEResourcesIterator<'a> { let offset_shift = LE::read_u16(table); let mut iterator = NEResourcesIterator { table, index: 2, table_kind: NEResourceKind::Integer(0xffff), offset_shift, block_index: 0, block_len: 0, finished: false, }; iterator.load_next_block(); iterator } fn load_next_block(&mut self) { let id = LE::read_u16(&self.table[self.index..]); self.finished = id == 0; if !self.finished { self.table_kind = if id & 0x8000 != 0 { let id = id & 0x7fff; if let Some(kind) = NEPredefinedResourceKind::from_u16(id) { NEResourceKind::Predefined(kind) } else { NEResourceKind::Integer(id) } } else { NEResourceKind::String(read_pascal_string(&self.table[self.index + id as usize..]).unwrap().1) }; self.block_index = 0; self.block_len = LE::read_u16(&self.table[self.index + 2..]); self.index += 8; } } } impl<'a> Iterator for NEResourcesIterator<'a> { type Item = NEResourceEntry; fn next(&mut self) -> Option<Self::Item> { if self.block_index == self.block_len { self.load_next_block(); } if self.finished { None } else { let (_, header) = read_resource(&self.table[self.index..], self.table, self.table_kind.clone(), self.offset_shift).unwrap(); self.index += 12; self.block_index += 1; Some(header) } } } impl<'a> NEExecutable<'a> { pub fn new(input: &'a [u8]) -> Result<Self, ParseError> { let header_offset = try_parse!(parse_ne_offset(input), ParseError::NotMZ); let raw_header = &input[header_offset as usize..]; let header = try_parse!(read_ne_header(raw_header), ParseError::NotNE); Ok(NEExecutable { input, header, header_offset, // TODO: Get rid of this raw_header }) } pub fn raw_data(&self) -> &'a [u8] { self.input } pub fn header_offset(&self) -> usize { self.header_offset as usize } pub fn name(&self) -> Option<String> { if self.header.non_resident_table_size == 0 { None } else { let ne_non_resident_table = &self.input[self.header.non_resident_table_offset as usize..]; match read_pascal_string(&ne_non_resident_table) { Ok((_, name)) => Some(name), Err(_) => None } } } pub fn header(&self) -> &NEHeader { &self.header } pub fn selfload_header(&self) -> Result<Option<(NESelfLoadHeader, &[u8])>, ParseError> { if self.header.flags.contains(NEFlags::SELF_LOAD) { Ok(Some(self.selfload_header_impl()?)) } else { Ok(None) } } /// # Arguments /// * segment_number - 1-indexed segment number pub fn segment_header(&self, segment_number: u16) -> Result<NESegmentEntry, ParseError> { assert!(segment_number != 0 || segment_number <= self.header.num_segments, format!("segment number {} is out of range", segment_number)); let offset = self.header.segment_table_offset + ((segment_number - 1) * SEGMENT_HEADER_SIZE); match parse_segment_header(&self.raw_header[offset as usize..], self.header.alignment_shift_count) { Ok((_, header)) => Ok(header), Err(_) => Err(ParseError::SegmentHeader{ segment_number }) } } /// # Arguments /// * segment_number - 1-indexed segment number pub fn segment_data(&self, segment_number: u16) -> Result<&[u8], ParseError> { let header = self.segment_header(segment_number)?; let data = &self.input[header.offset as usize..]; let mut size = header.data_size as usize; if header.flags.contains(NESegmentFlags::HAS_RELOC) { let fixup_table_size = LE::read_u16(&data[size..]) as usize * FIXUP_SIZE as usize; size += fixup_table_size; } Ok(&data[..size]) } pub fn resource_table_alignment_shift(&self) -> Option<u16> { if let Some(table) = self.resource_table_data() { Some(LE::read_u16(table)) } else { None } } pub fn resource_table_data(&self) -> Option<&[u8]> { if self.has_resource_table() { Some(&self.raw_header[self.header.resource_table_offset as usize..]) } else { None } } pub fn iter_resources(&self) -> NEResourcesIterator { if self.has_resource_table()
else { NEResourcesIterator { table: self.raw_header, index: 0, table_kind: NEResourceKind::Integer(0xffff), offset_shift: 0, block_index: 1, block_len: 0, finished: true } } } pub fn has_resource_table(&self) -> bool { // In DIRAPI.DLL from Director for Windows, the resource table offset // is non-zero but there is no resource table; the resource table offset // and names table offset are identical. self.header.resource_table_offset != 0 && self.header.resource_table_offset != self.header.names_table_offset } fn selfload_header_impl(&self) -> Result<(NESelfLoadHeader, &[u8]), ParseError> { let segment_data = self.segment_data(1)?; match read_selfload_header(segment_data) { Ok(header) => Ok((header.1, header.0)), Err(_) => Err(ParseError::SelfLoadHeader) } } }
{ NEResourcesIterator::new(&self.raw_header[self.header.resource_table_offset as usize..]) }
conditional_block
query-builder-common.js
/** * Show the tables as tree format at side menu in window, * here we can Collapse and Expend the tables under "Table" title bar * @author mahesh */ $(function() { $('.tree li:has(ul)').addClass('parent_li').find(' > span').attr( 'title', 'Collapse this branch'); $('.tree li.parent_li > span').on( 'click', function(e) { var children = $(this).parent('li.parent_li').find(' > div > ul > li'); if (children.is(":visible")) { children.hide('fast'); $(this).attr('title', 'Expand this branch').find( ' > i').addClass('fa-plus-square') .removeClass('fa-minus-square'); } else { children.show('fast'); $(this).attr('title', 'Collapse this branch').find( ' > i').addClass('fa-minus-square') .removeClass('fa-plus-square'); } e.stopPropagation(); }); }); fetchAllTablesAjax(loadTablesInTree); enableDeleteTable(); enableTableSearch(); showExpressionBuilder(); function showExpressionBuilder(){ loadExpressionBuilder(); $(".expression-builder-icon").click(function(){ if($(".expression-container").is(":visible")){ $(".expression-container").hide('slide', { direction : "right" }, 500); $(".table-list li").draggable('enable'); $("#reset").removeClass("reset rotate").addClass("disabled-link"); }else{ $(".expression-container").show('slide', { direction : "right" }, 500); $(".table-list li").draggable('disable'); $("#reset").removeClass("reset disabled-link").addClass("reset rotate"); } }); // Enable slim scroll bar for table enableSlimScroll($(".query-builder"), { height : "410px" }); $("[data-toggle='tooltip']").tooltip({ placement : "top" }); } /** * Load all tables in tree, this method call using callback of "fetchAllTablesAjax(loadTablesInTree);" * @param tables */ function loadTablesInTree(tables) { $("ul.table-list").html(""); $.each(tables, function(index, table) { $("<li>", { 'id' : table.name, 'class' : "tree-table", 'html' : "<span><i class='fa fa-table'></i> " + (table.name).toUpperCase() + "</span>" }).appendTo('ul.table-list'); }); $("ul.table-list").find("li").slideDown("fade"); // Enable slim scroll bar for table enableSlimScroll($("ul.table-list"), { height : "382px" }); // Enable drag and drop events enableDNDEvents(); } function enableTableSearch(){ var isFiltered = false; // Enable search key events $("input.search-tbl-query").keyup(function() { var tableNameStr = $("input.search-tbl-query").val(); if(tableNameStr.length > 2){ isFiltered = true; fetchTablesByTableNameAjax(tableNameStr.toLowerCase(), loadTablesInTree); } else if(isFiltered) { isFiltered = false; fetchTablesByTableNameAjax("", loadTablesInTree); } }); } /** * Load all columns by given table name, * this method call using callback of "fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns);" * @param tables */ function loadTableColumns(columns, columnId) { $.each(columns, function(index, column) { $("ul#" + columnId).append( $("<li>", { 'id' : column.id, 'name' : column.field, 'class' : "column-list", 'html' : "<span><i class='fa fa-columns'></i> " + column.field + "</span>" }) ); }); enableJoinRules($("ul#" + columnId)); // Toggle event to select and unselect the table column $("ul#" + columnId).find("li").click(function() { $(this).toggleClass("column-highlight"); }); // Enable slim scroll bar for table enableSlimScroll($("ul#" + columnId), { height: '185px' }); } var prevConnectionId = ""; function enableJoinRules(columnListObj){ columnListObj.find("li").click(function(){ var connectionId = $(this).attr("id"); if(prevConnectionId && prevConnectionId != connectionId){ var originId = $("#"+prevConnectionId).closest(".table-column-list-container").attr("id"); var targetId = $("#"+connectionId).closest(".table-column-list-container").attr("id"); $(".table-column-list-container").connections({ 'from' : '#'+prevConnectionId, 'to' : '#'+connectionId, 'class' : 'fast' }); $.repeat().add('connection').each($).connections('update').wait(0); prevConnectionId = "", connectionId = ""; endLinkMode(); }else{ prevConnectionId = connectionId; var linkLine = $('<div id="new-link-line"></div>').appendTo('body'); linkLine .css('top', $('#'+connectionId).offset().top + $('#'+connectionId).height() / 2) .css('left', $('#'+connectionId).offset().left); // Cancel on right click $(document).bind('mousedown.link', function(event) { if(event.which == 3) { endLinkMode(); } }); $(document).bind('keydown.link', function(event) { // ESCAPE key pressed if(event.keyCode == 27) { endLinkMode(); } }); $(document).mousemove(function(event){ linkMouseMoveEvent(event, connectionId) }); } }); } function linkMouseMoveEvent(event, currentColumnId) { if($('#new-link-line').length > 0) { var originX = $('#'+currentColumnId).offset().left; var originY = $('#'+currentColumnId).offset().top + $('#'+currentColumnId).height() / 2; var length = Math.sqrt((event.pageX - originX) * (event.pageX - originX) + (event.pageY - originY) * (event.pageY - originY)); var angle = 180 / 3.1415 * Math.acos((event.pageY - originY) / length); if(event.pageX > originX) angle *= -1; $('#new-link-line') .css('height', length) .css('-webkit-transform', 'rotate(' + angle + 'deg)') .css('-moz-transform', 'rotate(' + angle + 'deg)') .css('-o-transform', 'rotate(' + angle + 'deg)') .css('-ms-transform', 'rotate(' + angle + 'deg)') .css('transform', 'rotate(' + angle + 'deg)'); } } function endLinkMode() { $('#new-link-line').remove(); $(document).unbind('mousemove.link').unbind('click.link').unbind('keydown.link'); } /** * Load operators in select box * @param results */ function loadOperatorsInSelectBox(results){ $(".pop-modal-content").find(".operators_select_box").each(function(){ $(this).ddslick({ data : results, width : 170 }); }); } var connections = []; var connectionsId = []; //connections.push(new $.connect('#PROD2CAT', '#PRODUKT', {leftLabel : 'Many', rightLabel: 'One'})); /** * Here we enable the drag and drop events for drag the tables and drop into drop area with list of columns */ function enableDNDEvents() { var zIndex = 3; $(".table-list li").draggable({ revert : "invalid", cursorAt : { top : -8, left : -8 }, appendTo : "body", helper : function(event, ui) { var tableName = $(this).attr("id"); var html = '<div class="drag-helper"><span><i class="fa fa-table"></i> ' + tableName + '</span></div>'; return $(html); } }); $(".table-droppable").droppable({ tolerance : "pointer", accept : ".tree-table", hoverClass : "table-droppable-glow-effect", drop : function(event, ui) { var pos = ui.position, dPos = $(this).offset(); var topPosition = pos.top - dPos.top, leftPosition = pos.left - dPos.left, uniqueId = guid(), tableName = $(ui.draggable).attr("id"), tableId = tableName + "_tblid_" + uniqueId, columnId = tableName + "_columnid_" + uniqueId; $(this).append( $("<div>", { 'id' : tableId, 'class' : 'table-column-list-container draggable', 'style' : 'display:none;left:' + leftPosition + 'px;top:' + topPosition + 'px;z-index:' + (zIndex++), 'html' : '<div tableName="' + tableName + '" class="drag-table-title-bar"><span><i class="fa fa-table"></i> ' + tableName + '</span><div style="display:none;" id="' + tableName + '_tbl-delete" class="tbl-delete"><i class="fa fa-close fa-lg"></i></div></div><ul id="' + columnId + '" class="table-column-list"></ul>' }) ); $("#" + tableId).slideDown("normal", function() { $(this).show(); }); $("#" + tableId).draggable({ containment : "parent", handle : ".drag-table-title-bar", drag : function(event, ui) { var item = this; connections.forEach(function(connection){ if(connection.elem1[0] === item || connection.elem2[0] === item) { connection.calculate(); } }) }, start : function(event, ui) { $(this).css("z-index", zIndex++); $(".delete-table-in-drop-area").show('fade'); }, stop : function(event, ui) { $(".delete-table-in-drop-area").hide('fade', {}, 1000); } }); fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns); loadTableInExpressionFilter(tableName); } }); } /** * Enable the droppable option remove the table. */ function enableDeleteTable() { $(".delete-table-in-drop-area").droppable({ greedy : true,
deleteTableFromExpressionCombo($(ui.draggable).find(".drag-table-title-bar").attr("tableName")); $(ui.draggable).hide('scale', { origin: ["top", "right"] }, 300, function() { $(this).remove(); }); } }); } /** * Initialize the circle pulse effect for table drop operation from drop area */ $(document).ready(function() { var x = 0; addCircle(x); setInterval(function() { if (x === 0) { x = 1; } addCircle(x); x++; }, 1200); }); /** * Animation for show the pulse effect for delete the table which is drop inside the drop area * @param id */ function addCircle(id) { $('.delete-table-in-drop-area').append('<div id="' + id + '" class="circle"></div>'); $('#' + id).animate({ 'width': '100px', 'height': '100px', 'margin-top': '-48px', 'margin-left': '-48px', 'opacity': '0', }, 4000, 'easeOutCirc'); setInterval(function() { $('#' + id).remove(); }, 4000); }
tolerance : "touch", accept : ".table-column-list-container", drop : function(event, ui) {
random_line_split
query-builder-common.js
/** * Show the tables as tree format at side menu in window, * here we can Collapse and Expend the tables under "Table" title bar * @author mahesh */ $(function() { $('.tree li:has(ul)').addClass('parent_li').find(' > span').attr( 'title', 'Collapse this branch'); $('.tree li.parent_li > span').on( 'click', function(e) { var children = $(this).parent('li.parent_li').find(' > div > ul > li'); if (children.is(":visible")) { children.hide('fast'); $(this).attr('title', 'Expand this branch').find( ' > i').addClass('fa-plus-square') .removeClass('fa-minus-square'); } else { children.show('fast'); $(this).attr('title', 'Collapse this branch').find( ' > i').addClass('fa-minus-square') .removeClass('fa-plus-square'); } e.stopPropagation(); }); }); fetchAllTablesAjax(loadTablesInTree); enableDeleteTable(); enableTableSearch(); showExpressionBuilder(); function showExpressionBuilder(){ loadExpressionBuilder(); $(".expression-builder-icon").click(function(){ if($(".expression-container").is(":visible")){ $(".expression-container").hide('slide', { direction : "right" }, 500); $(".table-list li").draggable('enable'); $("#reset").removeClass("reset rotate").addClass("disabled-link"); }else{ $(".expression-container").show('slide', { direction : "right" }, 500); $(".table-list li").draggable('disable'); $("#reset").removeClass("reset disabled-link").addClass("reset rotate"); } }); // Enable slim scroll bar for table enableSlimScroll($(".query-builder"), { height : "410px" }); $("[data-toggle='tooltip']").tooltip({ placement : "top" }); } /** * Load all tables in tree, this method call using callback of "fetchAllTablesAjax(loadTablesInTree);" * @param tables */ function loadTablesInTree(tables) { $("ul.table-list").html(""); $.each(tables, function(index, table) { $("<li>", { 'id' : table.name, 'class' : "tree-table", 'html' : "<span><i class='fa fa-table'></i> " + (table.name).toUpperCase() + "</span>" }).appendTo('ul.table-list'); }); $("ul.table-list").find("li").slideDown("fade"); // Enable slim scroll bar for table enableSlimScroll($("ul.table-list"), { height : "382px" }); // Enable drag and drop events enableDNDEvents(); } function enableTableSearch(){ var isFiltered = false; // Enable search key events $("input.search-tbl-query").keyup(function() { var tableNameStr = $("input.search-tbl-query").val(); if(tableNameStr.length > 2){ isFiltered = true; fetchTablesByTableNameAjax(tableNameStr.toLowerCase(), loadTablesInTree); } else if(isFiltered) { isFiltered = false; fetchTablesByTableNameAjax("", loadTablesInTree); } }); } /** * Load all columns by given table name, * this method call using callback of "fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns);" * @param tables */ function loadTableColumns(columns, columnId) { $.each(columns, function(index, column) { $("ul#" + columnId).append( $("<li>", { 'id' : column.id, 'name' : column.field, 'class' : "column-list", 'html' : "<span><i class='fa fa-columns'></i> " + column.field + "</span>" }) ); }); enableJoinRules($("ul#" + columnId)); // Toggle event to select and unselect the table column $("ul#" + columnId).find("li").click(function() { $(this).toggleClass("column-highlight"); }); // Enable slim scroll bar for table enableSlimScroll($("ul#" + columnId), { height: '185px' }); } var prevConnectionId = ""; function enableJoinRules(columnListObj){ columnListObj.find("li").click(function(){ var connectionId = $(this).attr("id"); if(prevConnectionId && prevConnectionId != connectionId){ var originId = $("#"+prevConnectionId).closest(".table-column-list-container").attr("id"); var targetId = $("#"+connectionId).closest(".table-column-list-container").attr("id"); $(".table-column-list-container").connections({ 'from' : '#'+prevConnectionId, 'to' : '#'+connectionId, 'class' : 'fast' }); $.repeat().add('connection').each($).connections('update').wait(0); prevConnectionId = "", connectionId = ""; endLinkMode(); }else{ prevConnectionId = connectionId; var linkLine = $('<div id="new-link-line"></div>').appendTo('body'); linkLine .css('top', $('#'+connectionId).offset().top + $('#'+connectionId).height() / 2) .css('left', $('#'+connectionId).offset().left); // Cancel on right click $(document).bind('mousedown.link', function(event) { if(event.which == 3) { endLinkMode(); } }); $(document).bind('keydown.link', function(event) { // ESCAPE key pressed if(event.keyCode == 27) { endLinkMode(); } }); $(document).mousemove(function(event){ linkMouseMoveEvent(event, connectionId) }); } }); } function linkMouseMoveEvent(event, currentColumnId) { if($('#new-link-line').length > 0) { var originX = $('#'+currentColumnId).offset().left; var originY = $('#'+currentColumnId).offset().top + $('#'+currentColumnId).height() / 2; var length = Math.sqrt((event.pageX - originX) * (event.pageX - originX) + (event.pageY - originY) * (event.pageY - originY)); var angle = 180 / 3.1415 * Math.acos((event.pageY - originY) / length); if(event.pageX > originX) angle *= -1; $('#new-link-line') .css('height', length) .css('-webkit-transform', 'rotate(' + angle + 'deg)') .css('-moz-transform', 'rotate(' + angle + 'deg)') .css('-o-transform', 'rotate(' + angle + 'deg)') .css('-ms-transform', 'rotate(' + angle + 'deg)') .css('transform', 'rotate(' + angle + 'deg)'); } } function
() { $('#new-link-line').remove(); $(document).unbind('mousemove.link').unbind('click.link').unbind('keydown.link'); } /** * Load operators in select box * @param results */ function loadOperatorsInSelectBox(results){ $(".pop-modal-content").find(".operators_select_box").each(function(){ $(this).ddslick({ data : results, width : 170 }); }); } var connections = []; var connectionsId = []; //connections.push(new $.connect('#PROD2CAT', '#PRODUKT', {leftLabel : 'Many', rightLabel: 'One'})); /** * Here we enable the drag and drop events for drag the tables and drop into drop area with list of columns */ function enableDNDEvents() { var zIndex = 3; $(".table-list li").draggable({ revert : "invalid", cursorAt : { top : -8, left : -8 }, appendTo : "body", helper : function(event, ui) { var tableName = $(this).attr("id"); var html = '<div class="drag-helper"><span><i class="fa fa-table"></i> ' + tableName + '</span></div>'; return $(html); } }); $(".table-droppable").droppable({ tolerance : "pointer", accept : ".tree-table", hoverClass : "table-droppable-glow-effect", drop : function(event, ui) { var pos = ui.position, dPos = $(this).offset(); var topPosition = pos.top - dPos.top, leftPosition = pos.left - dPos.left, uniqueId = guid(), tableName = $(ui.draggable).attr("id"), tableId = tableName + "_tblid_" + uniqueId, columnId = tableName + "_columnid_" + uniqueId; $(this).append( $("<div>", { 'id' : tableId, 'class' : 'table-column-list-container draggable', 'style' : 'display:none;left:' + leftPosition + 'px;top:' + topPosition + 'px;z-index:' + (zIndex++), 'html' : '<div tableName="' + tableName + '" class="drag-table-title-bar"><span><i class="fa fa-table"></i> ' + tableName + '</span><div style="display:none;" id="' + tableName + '_tbl-delete" class="tbl-delete"><i class="fa fa-close fa-lg"></i></div></div><ul id="' + columnId + '" class="table-column-list"></ul>' }) ); $("#" + tableId).slideDown("normal", function() { $(this).show(); }); $("#" + tableId).draggable({ containment : "parent", handle : ".drag-table-title-bar", drag : function(event, ui) { var item = this; connections.forEach(function(connection){ if(connection.elem1[0] === item || connection.elem2[0] === item) { connection.calculate(); } }) }, start : function(event, ui) { $(this).css("z-index", zIndex++); $(".delete-table-in-drop-area").show('fade'); }, stop : function(event, ui) { $(".delete-table-in-drop-area").hide('fade', {}, 1000); } }); fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns); loadTableInExpressionFilter(tableName); } }); } /** * Enable the droppable option remove the table. */ function enableDeleteTable() { $(".delete-table-in-drop-area").droppable({ greedy : true, tolerance : "touch", accept : ".table-column-list-container", drop : function(event, ui) { deleteTableFromExpressionCombo($(ui.draggable).find(".drag-table-title-bar").attr("tableName")); $(ui.draggable).hide('scale', { origin: ["top", "right"] }, 300, function() { $(this).remove(); }); } }); } /** * Initialize the circle pulse effect for table drop operation from drop area */ $(document).ready(function() { var x = 0; addCircle(x); setInterval(function() { if (x === 0) { x = 1; } addCircle(x); x++; }, 1200); }); /** * Animation for show the pulse effect for delete the table which is drop inside the drop area * @param id */ function addCircle(id) { $('.delete-table-in-drop-area').append('<div id="' + id + '" class="circle"></div>'); $('#' + id).animate({ 'width': '100px', 'height': '100px', 'margin-top': '-48px', 'margin-left': '-48px', 'opacity': '0', }, 4000, 'easeOutCirc'); setInterval(function() { $('#' + id).remove(); }, 4000); }
endLinkMode
identifier_name
query-builder-common.js
/** * Show the tables as tree format at side menu in window, * here we can Collapse and Expend the tables under "Table" title bar * @author mahesh */ $(function() { $('.tree li:has(ul)').addClass('parent_li').find(' > span').attr( 'title', 'Collapse this branch'); $('.tree li.parent_li > span').on( 'click', function(e) { var children = $(this).parent('li.parent_li').find(' > div > ul > li'); if (children.is(":visible"))
else { children.show('fast'); $(this).attr('title', 'Collapse this branch').find( ' > i').addClass('fa-minus-square') .removeClass('fa-plus-square'); } e.stopPropagation(); }); }); fetchAllTablesAjax(loadTablesInTree); enableDeleteTable(); enableTableSearch(); showExpressionBuilder(); function showExpressionBuilder(){ loadExpressionBuilder(); $(".expression-builder-icon").click(function(){ if($(".expression-container").is(":visible")){ $(".expression-container").hide('slide', { direction : "right" }, 500); $(".table-list li").draggable('enable'); $("#reset").removeClass("reset rotate").addClass("disabled-link"); }else{ $(".expression-container").show('slide', { direction : "right" }, 500); $(".table-list li").draggable('disable'); $("#reset").removeClass("reset disabled-link").addClass("reset rotate"); } }); // Enable slim scroll bar for table enableSlimScroll($(".query-builder"), { height : "410px" }); $("[data-toggle='tooltip']").tooltip({ placement : "top" }); } /** * Load all tables in tree, this method call using callback of "fetchAllTablesAjax(loadTablesInTree);" * @param tables */ function loadTablesInTree(tables) { $("ul.table-list").html(""); $.each(tables, function(index, table) { $("<li>", { 'id' : table.name, 'class' : "tree-table", 'html' : "<span><i class='fa fa-table'></i> " + (table.name).toUpperCase() + "</span>" }).appendTo('ul.table-list'); }); $("ul.table-list").find("li").slideDown("fade"); // Enable slim scroll bar for table enableSlimScroll($("ul.table-list"), { height : "382px" }); // Enable drag and drop events enableDNDEvents(); } function enableTableSearch(){ var isFiltered = false; // Enable search key events $("input.search-tbl-query").keyup(function() { var tableNameStr = $("input.search-tbl-query").val(); if(tableNameStr.length > 2){ isFiltered = true; fetchTablesByTableNameAjax(tableNameStr.toLowerCase(), loadTablesInTree); } else if(isFiltered) { isFiltered = false; fetchTablesByTableNameAjax("", loadTablesInTree); } }); } /** * Load all columns by given table name, * this method call using callback of "fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns);" * @param tables */ function loadTableColumns(columns, columnId) { $.each(columns, function(index, column) { $("ul#" + columnId).append( $("<li>", { 'id' : column.id, 'name' : column.field, 'class' : "column-list", 'html' : "<span><i class='fa fa-columns'></i> " + column.field + "</span>" }) ); }); enableJoinRules($("ul#" + columnId)); // Toggle event to select and unselect the table column $("ul#" + columnId).find("li").click(function() { $(this).toggleClass("column-highlight"); }); // Enable slim scroll bar for table enableSlimScroll($("ul#" + columnId), { height: '185px' }); } var prevConnectionId = ""; function enableJoinRules(columnListObj){ columnListObj.find("li").click(function(){ var connectionId = $(this).attr("id"); if(prevConnectionId && prevConnectionId != connectionId){ var originId = $("#"+prevConnectionId).closest(".table-column-list-container").attr("id"); var targetId = $("#"+connectionId).closest(".table-column-list-container").attr("id"); $(".table-column-list-container").connections({ 'from' : '#'+prevConnectionId, 'to' : '#'+connectionId, 'class' : 'fast' }); $.repeat().add('connection').each($).connections('update').wait(0); prevConnectionId = "", connectionId = ""; endLinkMode(); }else{ prevConnectionId = connectionId; var linkLine = $('<div id="new-link-line"></div>').appendTo('body'); linkLine .css('top', $('#'+connectionId).offset().top + $('#'+connectionId).height() / 2) .css('left', $('#'+connectionId).offset().left); // Cancel on right click $(document).bind('mousedown.link', function(event) { if(event.which == 3) { endLinkMode(); } }); $(document).bind('keydown.link', function(event) { // ESCAPE key pressed if(event.keyCode == 27) { endLinkMode(); } }); $(document).mousemove(function(event){ linkMouseMoveEvent(event, connectionId) }); } }); } function linkMouseMoveEvent(event, currentColumnId) { if($('#new-link-line').length > 0) { var originX = $('#'+currentColumnId).offset().left; var originY = $('#'+currentColumnId).offset().top + $('#'+currentColumnId).height() / 2; var length = Math.sqrt((event.pageX - originX) * (event.pageX - originX) + (event.pageY - originY) * (event.pageY - originY)); var angle = 180 / 3.1415 * Math.acos((event.pageY - originY) / length); if(event.pageX > originX) angle *= -1; $('#new-link-line') .css('height', length) .css('-webkit-transform', 'rotate(' + angle + 'deg)') .css('-moz-transform', 'rotate(' + angle + 'deg)') .css('-o-transform', 'rotate(' + angle + 'deg)') .css('-ms-transform', 'rotate(' + angle + 'deg)') .css('transform', 'rotate(' + angle + 'deg)'); } } function endLinkMode() { $('#new-link-line').remove(); $(document).unbind('mousemove.link').unbind('click.link').unbind('keydown.link'); } /** * Load operators in select box * @param results */ function loadOperatorsInSelectBox(results){ $(".pop-modal-content").find(".operators_select_box").each(function(){ $(this).ddslick({ data : results, width : 170 }); }); } var connections = []; var connectionsId = []; //connections.push(new $.connect('#PROD2CAT', '#PRODUKT', {leftLabel : 'Many', rightLabel: 'One'})); /** * Here we enable the drag and drop events for drag the tables and drop into drop area with list of columns */ function enableDNDEvents() { var zIndex = 3; $(".table-list li").draggable({ revert : "invalid", cursorAt : { top : -8, left : -8 }, appendTo : "body", helper : function(event, ui) { var tableName = $(this).attr("id"); var html = '<div class="drag-helper"><span><i class="fa fa-table"></i> ' + tableName + '</span></div>'; return $(html); } }); $(".table-droppable").droppable({ tolerance : "pointer", accept : ".tree-table", hoverClass : "table-droppable-glow-effect", drop : function(event, ui) { var pos = ui.position, dPos = $(this).offset(); var topPosition = pos.top - dPos.top, leftPosition = pos.left - dPos.left, uniqueId = guid(), tableName = $(ui.draggable).attr("id"), tableId = tableName + "_tblid_" + uniqueId, columnId = tableName + "_columnid_" + uniqueId; $(this).append( $("<div>", { 'id' : tableId, 'class' : 'table-column-list-container draggable', 'style' : 'display:none;left:' + leftPosition + 'px;top:' + topPosition + 'px;z-index:' + (zIndex++), 'html' : '<div tableName="' + tableName + '" class="drag-table-title-bar"><span><i class="fa fa-table"></i> ' + tableName + '</span><div style="display:none;" id="' + tableName + '_tbl-delete" class="tbl-delete"><i class="fa fa-close fa-lg"></i></div></div><ul id="' + columnId + '" class="table-column-list"></ul>' }) ); $("#" + tableId).slideDown("normal", function() { $(this).show(); }); $("#" + tableId).draggable({ containment : "parent", handle : ".drag-table-title-bar", drag : function(event, ui) { var item = this; connections.forEach(function(connection){ if(connection.elem1[0] === item || connection.elem2[0] === item) { connection.calculate(); } }) }, start : function(event, ui) { $(this).css("z-index", zIndex++); $(".delete-table-in-drop-area").show('fade'); }, stop : function(event, ui) { $(".delete-table-in-drop-area").hide('fade', {}, 1000); } }); fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns); loadTableInExpressionFilter(tableName); } }); } /** * Enable the droppable option remove the table. */ function enableDeleteTable() { $(".delete-table-in-drop-area").droppable({ greedy : true, tolerance : "touch", accept : ".table-column-list-container", drop : function(event, ui) { deleteTableFromExpressionCombo($(ui.draggable).find(".drag-table-title-bar").attr("tableName")); $(ui.draggable).hide('scale', { origin: ["top", "right"] }, 300, function() { $(this).remove(); }); } }); } /** * Initialize the circle pulse effect for table drop operation from drop area */ $(document).ready(function() { var x = 0; addCircle(x); setInterval(function() { if (x === 0) { x = 1; } addCircle(x); x++; }, 1200); }); /** * Animation for show the pulse effect for delete the table which is drop inside the drop area * @param id */ function addCircle(id) { $('.delete-table-in-drop-area').append('<div id="' + id + '" class="circle"></div>'); $('#' + id).animate({ 'width': '100px', 'height': '100px', 'margin-top': '-48px', 'margin-left': '-48px', 'opacity': '0', }, 4000, 'easeOutCirc'); setInterval(function() { $('#' + id).remove(); }, 4000); }
{ children.hide('fast'); $(this).attr('title', 'Expand this branch').find( ' > i').addClass('fa-plus-square') .removeClass('fa-minus-square'); }
conditional_block
query-builder-common.js
/** * Show the tables as tree format at side menu in window, * here we can Collapse and Expend the tables under "Table" title bar * @author mahesh */ $(function() { $('.tree li:has(ul)').addClass('parent_li').find(' > span').attr( 'title', 'Collapse this branch'); $('.tree li.parent_li > span').on( 'click', function(e) { var children = $(this).parent('li.parent_li').find(' > div > ul > li'); if (children.is(":visible")) { children.hide('fast'); $(this).attr('title', 'Expand this branch').find( ' > i').addClass('fa-plus-square') .removeClass('fa-minus-square'); } else { children.show('fast'); $(this).attr('title', 'Collapse this branch').find( ' > i').addClass('fa-minus-square') .removeClass('fa-plus-square'); } e.stopPropagation(); }); }); fetchAllTablesAjax(loadTablesInTree); enableDeleteTable(); enableTableSearch(); showExpressionBuilder(); function showExpressionBuilder(){ loadExpressionBuilder(); $(".expression-builder-icon").click(function(){ if($(".expression-container").is(":visible")){ $(".expression-container").hide('slide', { direction : "right" }, 500); $(".table-list li").draggable('enable'); $("#reset").removeClass("reset rotate").addClass("disabled-link"); }else{ $(".expression-container").show('slide', { direction : "right" }, 500); $(".table-list li").draggable('disable'); $("#reset").removeClass("reset disabled-link").addClass("reset rotate"); } }); // Enable slim scroll bar for table enableSlimScroll($(".query-builder"), { height : "410px" }); $("[data-toggle='tooltip']").tooltip({ placement : "top" }); } /** * Load all tables in tree, this method call using callback of "fetchAllTablesAjax(loadTablesInTree);" * @param tables */ function loadTablesInTree(tables) { $("ul.table-list").html(""); $.each(tables, function(index, table) { $("<li>", { 'id' : table.name, 'class' : "tree-table", 'html' : "<span><i class='fa fa-table'></i> " + (table.name).toUpperCase() + "</span>" }).appendTo('ul.table-list'); }); $("ul.table-list").find("li").slideDown("fade"); // Enable slim scroll bar for table enableSlimScroll($("ul.table-list"), { height : "382px" }); // Enable drag and drop events enableDNDEvents(); } function enableTableSearch(){ var isFiltered = false; // Enable search key events $("input.search-tbl-query").keyup(function() { var tableNameStr = $("input.search-tbl-query").val(); if(tableNameStr.length > 2){ isFiltered = true; fetchTablesByTableNameAjax(tableNameStr.toLowerCase(), loadTablesInTree); } else if(isFiltered) { isFiltered = false; fetchTablesByTableNameAjax("", loadTablesInTree); } }); } /** * Load all columns by given table name, * this method call using callback of "fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns);" * @param tables */ function loadTableColumns(columns, columnId) { $.each(columns, function(index, column) { $("ul#" + columnId).append( $("<li>", { 'id' : column.id, 'name' : column.field, 'class' : "column-list", 'html' : "<span><i class='fa fa-columns'></i> " + column.field + "</span>" }) ); }); enableJoinRules($("ul#" + columnId)); // Toggle event to select and unselect the table column $("ul#" + columnId).find("li").click(function() { $(this).toggleClass("column-highlight"); }); // Enable slim scroll bar for table enableSlimScroll($("ul#" + columnId), { height: '185px' }); } var prevConnectionId = ""; function enableJoinRules(columnListObj){ columnListObj.find("li").click(function(){ var connectionId = $(this).attr("id"); if(prevConnectionId && prevConnectionId != connectionId){ var originId = $("#"+prevConnectionId).closest(".table-column-list-container").attr("id"); var targetId = $("#"+connectionId).closest(".table-column-list-container").attr("id"); $(".table-column-list-container").connections({ 'from' : '#'+prevConnectionId, 'to' : '#'+connectionId, 'class' : 'fast' }); $.repeat().add('connection').each($).connections('update').wait(0); prevConnectionId = "", connectionId = ""; endLinkMode(); }else{ prevConnectionId = connectionId; var linkLine = $('<div id="new-link-line"></div>').appendTo('body'); linkLine .css('top', $('#'+connectionId).offset().top + $('#'+connectionId).height() / 2) .css('left', $('#'+connectionId).offset().left); // Cancel on right click $(document).bind('mousedown.link', function(event) { if(event.which == 3) { endLinkMode(); } }); $(document).bind('keydown.link', function(event) { // ESCAPE key pressed if(event.keyCode == 27) { endLinkMode(); } }); $(document).mousemove(function(event){ linkMouseMoveEvent(event, connectionId) }); } }); } function linkMouseMoveEvent(event, currentColumnId) { if($('#new-link-line').length > 0) { var originX = $('#'+currentColumnId).offset().left; var originY = $('#'+currentColumnId).offset().top + $('#'+currentColumnId).height() / 2; var length = Math.sqrt((event.pageX - originX) * (event.pageX - originX) + (event.pageY - originY) * (event.pageY - originY)); var angle = 180 / 3.1415 * Math.acos((event.pageY - originY) / length); if(event.pageX > originX) angle *= -1; $('#new-link-line') .css('height', length) .css('-webkit-transform', 'rotate(' + angle + 'deg)') .css('-moz-transform', 'rotate(' + angle + 'deg)') .css('-o-transform', 'rotate(' + angle + 'deg)') .css('-ms-transform', 'rotate(' + angle + 'deg)') .css('transform', 'rotate(' + angle + 'deg)'); } } function endLinkMode() { $('#new-link-line').remove(); $(document).unbind('mousemove.link').unbind('click.link').unbind('keydown.link'); } /** * Load operators in select box * @param results */ function loadOperatorsInSelectBox(results)
var connections = []; var connectionsId = []; //connections.push(new $.connect('#PROD2CAT', '#PRODUKT', {leftLabel : 'Many', rightLabel: 'One'})); /** * Here we enable the drag and drop events for drag the tables and drop into drop area with list of columns */ function enableDNDEvents() { var zIndex = 3; $(".table-list li").draggable({ revert : "invalid", cursorAt : { top : -8, left : -8 }, appendTo : "body", helper : function(event, ui) { var tableName = $(this).attr("id"); var html = '<div class="drag-helper"><span><i class="fa fa-table"></i> ' + tableName + '</span></div>'; return $(html); } }); $(".table-droppable").droppable({ tolerance : "pointer", accept : ".tree-table", hoverClass : "table-droppable-glow-effect", drop : function(event, ui) { var pos = ui.position, dPos = $(this).offset(); var topPosition = pos.top - dPos.top, leftPosition = pos.left - dPos.left, uniqueId = guid(), tableName = $(ui.draggable).attr("id"), tableId = tableName + "_tblid_" + uniqueId, columnId = tableName + "_columnid_" + uniqueId; $(this).append( $("<div>", { 'id' : tableId, 'class' : 'table-column-list-container draggable', 'style' : 'display:none;left:' + leftPosition + 'px;top:' + topPosition + 'px;z-index:' + (zIndex++), 'html' : '<div tableName="' + tableName + '" class="drag-table-title-bar"><span><i class="fa fa-table"></i> ' + tableName + '</span><div style="display:none;" id="' + tableName + '_tbl-delete" class="tbl-delete"><i class="fa fa-close fa-lg"></i></div></div><ul id="' + columnId + '" class="table-column-list"></ul>' }) ); $("#" + tableId).slideDown("normal", function() { $(this).show(); }); $("#" + tableId).draggable({ containment : "parent", handle : ".drag-table-title-bar", drag : function(event, ui) { var item = this; connections.forEach(function(connection){ if(connection.elem1[0] === item || connection.elem2[0] === item) { connection.calculate(); } }) }, start : function(event, ui) { $(this).css("z-index", zIndex++); $(".delete-table-in-drop-area").show('fade'); }, stop : function(event, ui) { $(".delete-table-in-drop-area").hide('fade', {}, 1000); } }); fetchTableColumnByTableNameAjax(tableName, columnId, loadTableColumns); loadTableInExpressionFilter(tableName); } }); } /** * Enable the droppable option remove the table. */ function enableDeleteTable() { $(".delete-table-in-drop-area").droppable({ greedy : true, tolerance : "touch", accept : ".table-column-list-container", drop : function(event, ui) { deleteTableFromExpressionCombo($(ui.draggable).find(".drag-table-title-bar").attr("tableName")); $(ui.draggable).hide('scale', { origin: ["top", "right"] }, 300, function() { $(this).remove(); }); } }); } /** * Initialize the circle pulse effect for table drop operation from drop area */ $(document).ready(function() { var x = 0; addCircle(x); setInterval(function() { if (x === 0) { x = 1; } addCircle(x); x++; }, 1200); }); /** * Animation for show the pulse effect for delete the table which is drop inside the drop area * @param id */ function addCircle(id) { $('.delete-table-in-drop-area').append('<div id="' + id + '" class="circle"></div>'); $('#' + id).animate({ 'width': '100px', 'height': '100px', 'margin-top': '-48px', 'margin-left': '-48px', 'opacity': '0', }, 4000, 'easeOutCirc'); setInterval(function() { $('#' + id).remove(); }, 4000); }
{ $(".pop-modal-content").find(".operators_select_box").each(function(){ $(this).ddslick({ data : results, width : 170 }); }); }
identifier_body
http.go
package main import ( "encoding/json" "fmt" m "github.com/log22/MoviesCrawler" "reflect" "net/http" "log" "github.com/gorilla/mux" "sync" "os" "path/filepath" "os/exec" "regexp" "strings" "errors"
) // Request struct for Search POST type SearchRequest struct { Services []string MovieName string } // Request struct for get downloads links POST type DownloadLinksRequest struct { Service string Movie m.Movie } // Response struct for Search Request type FoundMovies struct { Service string Movies m.FoundMovies } type RequestAddDownload struct { Link string } var allServices map[string]m.Crawler var torrentIP = "192.168.0.108" // spaHandler implements the http.Handler interface, so we can use it // to respond to HTTP requests. The path to the static directory and // path to the index file within that static directory are used to // serve the SPA in the given static directory. type spaHandler struct { staticPath string indexPath string } // ServeHTTP inspects the URL path to locate a file within the static dir // on the SPA handler. If a file is found, it will be served. If not, the // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // get the absolute path to prevent directory traversal path, err := filepath.Abs(r.URL.Path) if err != nil { // if we failed to get the absolute path respond with a 400 bad request // and stop http.Error(w, err.Error(), http.StatusBadRequest) return } // prepend the path with the path to the static directory path = filepath.Join(h.staticPath, path) // check whether a file exists at the given path _, err = os.Stat(path) if os.IsNotExist(err) { // file does not exist, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return } else if err != nil { // if we got an error (that wasn't that the file doesn't exist) stating the // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) return } // otherwise, use http.FileServer to serve the static dir http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } type Torrent struct { ID string Done, Downloaded, ETA, Up, Down, Ratio, Status, Name string } func ListTorrents(ip string) (error, []Torrent) { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-l").Output() if err != nil { log.Fatal(err) return errors.New("Error"), nil } // Split Lines into array lines := strings.Split(string(out), "\n") // Remove first and last line lines = lines[1:len(lines)-2] // Remove all 2 spaces re := regexp.MustCompile(`\s{2,}`) var torrents []Torrent for _, line := range lines { items := strings.Split(re.ReplaceAllString(line, "\t"), "\t")[1:] id := strings.ReplaceAll(items[0], "*", "") torrent := Torrent{id, items[1], items[2], items[3], items[4], items[5], items[6], items[7], strings.Join(items[8:],"")} torrents = append(torrents, torrent) } return nil, torrents } func AddTorrent(ip string, magnetLink string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-a", magnetLink).Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func StopTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-S").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func ResumeTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-s").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func DeleteTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-rad").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func getType(myvar interface{}) string { if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr { return "*" + t.Elem().Name() } else { return t.Name() } } func setServices() map[string]m.Crawler { services := make(map[string]m.Crawler) services["MegaTorrents"] = m.MegaTorrents{} services["BludTV"] = m.BludTV{} services["ComandoTorrents"] = m.ComandoTorrents{} services["MeusFilmesTorrent"] = m.MeusFilmesTorrent{} services["PirateTorrent"] = m.PirateTorrent{} return services } func listServices(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listServices\n", time.Now().Format("2006-01-02 15:04:05")) var listServices []string for key, _ := range allServices { listServices = append(listServices, key) } json.NewEncoder(w).Encode(listServices) } func searchMovie(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams SearchRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested searchMovie: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) var wg sync.WaitGroup var mutex sync.Mutex var results []FoundMovies // For each service requested, start one goroutine for _, service := range postParams.Services { fmt.Printf("Searching %s\n", service) // Add one go routine to group counter wg.Add(1) // Launch goroutine go func(service m.Crawler, wg *sync.WaitGroup, mutex *sync.Mutex, total *[]FoundMovies, postParams SearchRequest) { defer wg.Done() serviceStr := getType(service) // Search movie movieLinks := m.SearchAll(service, postParams.MovieName) fmt.Printf("Found %d on %s\n", len(movieLinks), serviceStr) if movieLinks != nil { mutex.Lock() results = append(results, FoundMovies{serviceStr, movieLinks}) mutex.Unlock() } }(allServices[service], &wg, &mutex, &results, postParams) } // Wait all services finish wg.Wait() // Send found movies json.NewEncoder(w).Encode(results) } func getMagnetLinks(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Receive a movie as params var postParams DownloadLinksRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested getMagnetLinks: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) // Get service struct service := allServices[postParams.Service] // Get Download Link downloadLink := postParams.Movie.Link // Get MagnetLinks var resultOptions m.FoundMagnetLinks resultOptions = service.GetDownloadLinks(downloadLink) // Send found links json.NewEncoder(w).Encode(resultOptions) } func listAllTorrents(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listAllTorrents\n", time.Now().Format("2006-01-02 15:04:05")) err, list := ListTorrents(torrentIP) if err != nil { w.WriteHeader(http.StatusBadRequest) return } json.NewEncoder(w).Encode(list) } func addMagnetLink(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams RequestAddDownload err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested addMagnetLink: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) result := AddTorrent(torrentIP, postParams.Link) if !result { http.Error(w, err.Error(), http.StatusBadRequest) return } json.NewEncoder(w).Encode(result) } func deleteTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := DeleteTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested deleteTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func resumeTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := ResumeTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested resumeTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func pauseTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := StopTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested pauseTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func main() { ip, _ := exec.Command("hostname", "-I").Output() ipF := strings.ReplaceAll(string(ip), "\n", "") ipF = strings.ReplaceAll(ipF, "\t", "") ips := strings.Split(ipF, " ") torrentIP = ips[0] fmt.Printf("IP: %s\n", torrentIP) allServices = setServices() r := mux.NewRouter() // Handle API routes api := r.PathPrefix("/api/").Subrouter() api.HandleFunc("/listservices", listServices) api.HandleFunc("/searchMovie", searchMovie).Methods("POST", "OPTIONS") api.HandleFunc("/getMagnetLinks", getMagnetLinks).Methods("POST", "OPTIONS") // Torrent Endpoints api.HandleFunc("/listTorrents", listAllTorrents).Methods("GET") api.HandleFunc("/addMagnetLink", addMagnetLink).Methods("POST", "OPTIONS") api.HandleFunc("/deleteTorrent/{id}", deleteTorrent).Methods("GET") api.HandleFunc("/resumeTorrent/{id}", resumeTorrent).Methods("GET") api.HandleFunc("/pauseTorrent/{id}", pauseTorrent).Methods("GET") // SPA route spa := spaHandler{staticPath: "./spa", indexPath: "index.html"} r.PathPrefix("/").Handler(spa) // Set CORS r.Use(mux.CORSMethodMiddleware(r)) // Listen fmt.Printf("[%s] - Starting MoviesPI Service\n", time.Now().Format("2006-01-02 15:04:05")) log.Fatal(http.ListenAndServe(":8888", r)) }
"time"
random_line_split
http.go
package main import ( "encoding/json" "fmt" m "github.com/log22/MoviesCrawler" "reflect" "net/http" "log" "github.com/gorilla/mux" "sync" "os" "path/filepath" "os/exec" "regexp" "strings" "errors" "time" ) // Request struct for Search POST type SearchRequest struct { Services []string MovieName string } // Request struct for get downloads links POST type DownloadLinksRequest struct { Service string Movie m.Movie } // Response struct for Search Request type FoundMovies struct { Service string Movies m.FoundMovies } type RequestAddDownload struct { Link string } var allServices map[string]m.Crawler var torrentIP = "192.168.0.108" // spaHandler implements the http.Handler interface, so we can use it // to respond to HTTP requests. The path to the static directory and // path to the index file within that static directory are used to // serve the SPA in the given static directory. type spaHandler struct { staticPath string indexPath string } // ServeHTTP inspects the URL path to locate a file within the static dir // on the SPA handler. If a file is found, it will be served. If not, the // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // get the absolute path to prevent directory traversal path, err := filepath.Abs(r.URL.Path) if err != nil { // if we failed to get the absolute path respond with a 400 bad request // and stop http.Error(w, err.Error(), http.StatusBadRequest) return } // prepend the path with the path to the static directory path = filepath.Join(h.staticPath, path) // check whether a file exists at the given path _, err = os.Stat(path) if os.IsNotExist(err) { // file does not exist, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return } else if err != nil { // if we got an error (that wasn't that the file doesn't exist) stating the // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) return } // otherwise, use http.FileServer to serve the static dir http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } type Torrent struct { ID string Done, Downloaded, ETA, Up, Down, Ratio, Status, Name string } func ListTorrents(ip string) (error, []Torrent) { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-l").Output() if err != nil { log.Fatal(err) return errors.New("Error"), nil } // Split Lines into array lines := strings.Split(string(out), "\n") // Remove first and last line lines = lines[1:len(lines)-2] // Remove all 2 spaces re := regexp.MustCompile(`\s{2,}`) var torrents []Torrent for _, line := range lines { items := strings.Split(re.ReplaceAllString(line, "\t"), "\t")[1:] id := strings.ReplaceAll(items[0], "*", "") torrent := Torrent{id, items[1], items[2], items[3], items[4], items[5], items[6], items[7], strings.Join(items[8:],"")} torrents = append(torrents, torrent) } return nil, torrents } func AddTorrent(ip string, magnetLink string) bool
func StopTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-S").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func ResumeTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-s").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func DeleteTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-rad").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func getType(myvar interface{}) string { if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr { return "*" + t.Elem().Name() } else { return t.Name() } } func setServices() map[string]m.Crawler { services := make(map[string]m.Crawler) services["MegaTorrents"] = m.MegaTorrents{} services["BludTV"] = m.BludTV{} services["ComandoTorrents"] = m.ComandoTorrents{} services["MeusFilmesTorrent"] = m.MeusFilmesTorrent{} services["PirateTorrent"] = m.PirateTorrent{} return services } func listServices(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listServices\n", time.Now().Format("2006-01-02 15:04:05")) var listServices []string for key, _ := range allServices { listServices = append(listServices, key) } json.NewEncoder(w).Encode(listServices) } func searchMovie(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams SearchRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested searchMovie: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) var wg sync.WaitGroup var mutex sync.Mutex var results []FoundMovies // For each service requested, start one goroutine for _, service := range postParams.Services { fmt.Printf("Searching %s\n", service) // Add one go routine to group counter wg.Add(1) // Launch goroutine go func(service m.Crawler, wg *sync.WaitGroup, mutex *sync.Mutex, total *[]FoundMovies, postParams SearchRequest) { defer wg.Done() serviceStr := getType(service) // Search movie movieLinks := m.SearchAll(service, postParams.MovieName) fmt.Printf("Found %d on %s\n", len(movieLinks), serviceStr) if movieLinks != nil { mutex.Lock() results = append(results, FoundMovies{serviceStr, movieLinks}) mutex.Unlock() } }(allServices[service], &wg, &mutex, &results, postParams) } // Wait all services finish wg.Wait() // Send found movies json.NewEncoder(w).Encode(results) } func getMagnetLinks(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Receive a movie as params var postParams DownloadLinksRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested getMagnetLinks: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) // Get service struct service := allServices[postParams.Service] // Get Download Link downloadLink := postParams.Movie.Link // Get MagnetLinks var resultOptions m.FoundMagnetLinks resultOptions = service.GetDownloadLinks(downloadLink) // Send found links json.NewEncoder(w).Encode(resultOptions) } func listAllTorrents(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listAllTorrents\n", time.Now().Format("2006-01-02 15:04:05")) err, list := ListTorrents(torrentIP) if err != nil { w.WriteHeader(http.StatusBadRequest) return } json.NewEncoder(w).Encode(list) } func addMagnetLink(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams RequestAddDownload err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested addMagnetLink: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) result := AddTorrent(torrentIP, postParams.Link) if !result { http.Error(w, err.Error(), http.StatusBadRequest) return } json.NewEncoder(w).Encode(result) } func deleteTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := DeleteTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested deleteTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func resumeTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := ResumeTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested resumeTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func pauseTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := StopTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested pauseTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func main() { ip, _ := exec.Command("hostname", "-I").Output() ipF := strings.ReplaceAll(string(ip), "\n", "") ipF = strings.ReplaceAll(ipF, "\t", "") ips := strings.Split(ipF, " ") torrentIP = ips[0] fmt.Printf("IP: %s\n", torrentIP) allServices = setServices() r := mux.NewRouter() // Handle API routes api := r.PathPrefix("/api/").Subrouter() api.HandleFunc("/listservices", listServices) api.HandleFunc("/searchMovie", searchMovie).Methods("POST", "OPTIONS") api.HandleFunc("/getMagnetLinks", getMagnetLinks).Methods("POST", "OPTIONS") // Torrent Endpoints api.HandleFunc("/listTorrents", listAllTorrents).Methods("GET") api.HandleFunc("/addMagnetLink", addMagnetLink).Methods("POST", "OPTIONS") api.HandleFunc("/deleteTorrent/{id}", deleteTorrent).Methods("GET") api.HandleFunc("/resumeTorrent/{id}", resumeTorrent).Methods("GET") api.HandleFunc("/pauseTorrent/{id}", pauseTorrent).Methods("GET") // SPA route spa := spaHandler{staticPath: "./spa", indexPath: "index.html"} r.PathPrefix("/").Handler(spa) // Set CORS r.Use(mux.CORSMethodMiddleware(r)) // Listen fmt.Printf("[%s] - Starting MoviesPI Service\n", time.Now().Format("2006-01-02 15:04:05")) log.Fatal(http.ListenAndServe(":8888", r)) }
{ out, err := exec.Command("/usr/bin/transmission-remote", ip,"-a", magnetLink).Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } }
identifier_body
http.go
package main import ( "encoding/json" "fmt" m "github.com/log22/MoviesCrawler" "reflect" "net/http" "log" "github.com/gorilla/mux" "sync" "os" "path/filepath" "os/exec" "regexp" "strings" "errors" "time" ) // Request struct for Search POST type SearchRequest struct { Services []string MovieName string } // Request struct for get downloads links POST type DownloadLinksRequest struct { Service string Movie m.Movie } // Response struct for Search Request type FoundMovies struct { Service string Movies m.FoundMovies } type RequestAddDownload struct { Link string } var allServices map[string]m.Crawler var torrentIP = "192.168.0.108" // spaHandler implements the http.Handler interface, so we can use it // to respond to HTTP requests. The path to the static directory and // path to the index file within that static directory are used to // serve the SPA in the given static directory. type spaHandler struct { staticPath string indexPath string } // ServeHTTP inspects the URL path to locate a file within the static dir // on the SPA handler. If a file is found, it will be served. If not, the // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // get the absolute path to prevent directory traversal path, err := filepath.Abs(r.URL.Path) if err != nil { // if we failed to get the absolute path respond with a 400 bad request // and stop http.Error(w, err.Error(), http.StatusBadRequest) return } // prepend the path with the path to the static directory path = filepath.Join(h.staticPath, path) // check whether a file exists at the given path _, err = os.Stat(path) if os.IsNotExist(err) { // file does not exist, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return } else if err != nil { // if we got an error (that wasn't that the file doesn't exist) stating the // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) return } // otherwise, use http.FileServer to serve the static dir http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } type Torrent struct { ID string Done, Downloaded, ETA, Up, Down, Ratio, Status, Name string } func ListTorrents(ip string) (error, []Torrent) { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-l").Output() if err != nil { log.Fatal(err) return errors.New("Error"), nil } // Split Lines into array lines := strings.Split(string(out), "\n") // Remove first and last line lines = lines[1:len(lines)-2] // Remove all 2 spaces re := regexp.MustCompile(`\s{2,}`) var torrents []Torrent for _, line := range lines { items := strings.Split(re.ReplaceAllString(line, "\t"), "\t")[1:] id := strings.ReplaceAll(items[0], "*", "") torrent := Torrent{id, items[1], items[2], items[3], items[4], items[5], items[6], items[7], strings.Join(items[8:],"")} torrents = append(torrents, torrent) } return nil, torrents } func AddTorrent(ip string, magnetLink string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-a", magnetLink).Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func StopTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-S").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func ResumeTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-s").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func DeleteTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-rad").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func getType(myvar interface{}) string { if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr { return "*" + t.Elem().Name() } else { return t.Name() } } func setServices() map[string]m.Crawler { services := make(map[string]m.Crawler) services["MegaTorrents"] = m.MegaTorrents{} services["BludTV"] = m.BludTV{} services["ComandoTorrents"] = m.ComandoTorrents{} services["MeusFilmesTorrent"] = m.MeusFilmesTorrent{} services["PirateTorrent"] = m.PirateTorrent{} return services } func
(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listServices\n", time.Now().Format("2006-01-02 15:04:05")) var listServices []string for key, _ := range allServices { listServices = append(listServices, key) } json.NewEncoder(w).Encode(listServices) } func searchMovie(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams SearchRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested searchMovie: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) var wg sync.WaitGroup var mutex sync.Mutex var results []FoundMovies // For each service requested, start one goroutine for _, service := range postParams.Services { fmt.Printf("Searching %s\n", service) // Add one go routine to group counter wg.Add(1) // Launch goroutine go func(service m.Crawler, wg *sync.WaitGroup, mutex *sync.Mutex, total *[]FoundMovies, postParams SearchRequest) { defer wg.Done() serviceStr := getType(service) // Search movie movieLinks := m.SearchAll(service, postParams.MovieName) fmt.Printf("Found %d on %s\n", len(movieLinks), serviceStr) if movieLinks != nil { mutex.Lock() results = append(results, FoundMovies{serviceStr, movieLinks}) mutex.Unlock() } }(allServices[service], &wg, &mutex, &results, postParams) } // Wait all services finish wg.Wait() // Send found movies json.NewEncoder(w).Encode(results) } func getMagnetLinks(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Receive a movie as params var postParams DownloadLinksRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested getMagnetLinks: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) // Get service struct service := allServices[postParams.Service] // Get Download Link downloadLink := postParams.Movie.Link // Get MagnetLinks var resultOptions m.FoundMagnetLinks resultOptions = service.GetDownloadLinks(downloadLink) // Send found links json.NewEncoder(w).Encode(resultOptions) } func listAllTorrents(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listAllTorrents\n", time.Now().Format("2006-01-02 15:04:05")) err, list := ListTorrents(torrentIP) if err != nil { w.WriteHeader(http.StatusBadRequest) return } json.NewEncoder(w).Encode(list) } func addMagnetLink(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams RequestAddDownload err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested addMagnetLink: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) result := AddTorrent(torrentIP, postParams.Link) if !result { http.Error(w, err.Error(), http.StatusBadRequest) return } json.NewEncoder(w).Encode(result) } func deleteTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := DeleteTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested deleteTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func resumeTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := ResumeTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested resumeTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func pauseTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := StopTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested pauseTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func main() { ip, _ := exec.Command("hostname", "-I").Output() ipF := strings.ReplaceAll(string(ip), "\n", "") ipF = strings.ReplaceAll(ipF, "\t", "") ips := strings.Split(ipF, " ") torrentIP = ips[0] fmt.Printf("IP: %s\n", torrentIP) allServices = setServices() r := mux.NewRouter() // Handle API routes api := r.PathPrefix("/api/").Subrouter() api.HandleFunc("/listservices", listServices) api.HandleFunc("/searchMovie", searchMovie).Methods("POST", "OPTIONS") api.HandleFunc("/getMagnetLinks", getMagnetLinks).Methods("POST", "OPTIONS") // Torrent Endpoints api.HandleFunc("/listTorrents", listAllTorrents).Methods("GET") api.HandleFunc("/addMagnetLink", addMagnetLink).Methods("POST", "OPTIONS") api.HandleFunc("/deleteTorrent/{id}", deleteTorrent).Methods("GET") api.HandleFunc("/resumeTorrent/{id}", resumeTorrent).Methods("GET") api.HandleFunc("/pauseTorrent/{id}", pauseTorrent).Methods("GET") // SPA route spa := spaHandler{staticPath: "./spa", indexPath: "index.html"} r.PathPrefix("/").Handler(spa) // Set CORS r.Use(mux.CORSMethodMiddleware(r)) // Listen fmt.Printf("[%s] - Starting MoviesPI Service\n", time.Now().Format("2006-01-02 15:04:05")) log.Fatal(http.ListenAndServe(":8888", r)) }
listServices
identifier_name
http.go
package main import ( "encoding/json" "fmt" m "github.com/log22/MoviesCrawler" "reflect" "net/http" "log" "github.com/gorilla/mux" "sync" "os" "path/filepath" "os/exec" "regexp" "strings" "errors" "time" ) // Request struct for Search POST type SearchRequest struct { Services []string MovieName string } // Request struct for get downloads links POST type DownloadLinksRequest struct { Service string Movie m.Movie } // Response struct for Search Request type FoundMovies struct { Service string Movies m.FoundMovies } type RequestAddDownload struct { Link string } var allServices map[string]m.Crawler var torrentIP = "192.168.0.108" // spaHandler implements the http.Handler interface, so we can use it // to respond to HTTP requests. The path to the static directory and // path to the index file within that static directory are used to // serve the SPA in the given static directory. type spaHandler struct { staticPath string indexPath string } // ServeHTTP inspects the URL path to locate a file within the static dir // on the SPA handler. If a file is found, it will be served. If not, the // file located at the index path on the SPA handler will be served. This // is suitable behavior for serving an SPA (single page application). func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // get the absolute path to prevent directory traversal path, err := filepath.Abs(r.URL.Path) if err != nil
// prepend the path with the path to the static directory path = filepath.Join(h.staticPath, path) // check whether a file exists at the given path _, err = os.Stat(path) if os.IsNotExist(err) { // file does not exist, serve index.html http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) return } else if err != nil { // if we got an error (that wasn't that the file doesn't exist) stating the // file, return a 500 internal server error and stop http.Error(w, err.Error(), http.StatusInternalServerError) return } // otherwise, use http.FileServer to serve the static dir http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) } type Torrent struct { ID string Done, Downloaded, ETA, Up, Down, Ratio, Status, Name string } func ListTorrents(ip string) (error, []Torrent) { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-l").Output() if err != nil { log.Fatal(err) return errors.New("Error"), nil } // Split Lines into array lines := strings.Split(string(out), "\n") // Remove first and last line lines = lines[1:len(lines)-2] // Remove all 2 spaces re := regexp.MustCompile(`\s{2,}`) var torrents []Torrent for _, line := range lines { items := strings.Split(re.ReplaceAllString(line, "\t"), "\t")[1:] id := strings.ReplaceAll(items[0], "*", "") torrent := Torrent{id, items[1], items[2], items[3], items[4], items[5], items[6], items[7], strings.Join(items[8:],"")} torrents = append(torrents, torrent) } return nil, torrents } func AddTorrent(ip string, magnetLink string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-a", magnetLink).Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func StopTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-S").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func ResumeTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-s").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func DeleteTorrent(ip string, ID string) bool { out, err := exec.Command("/usr/bin/transmission-remote", ip,"-t", ID, "-rad").Output() if err != nil { log.Fatal(err) return false } if strings.Contains(string(out), "Error") { return false } else { return true } } func getType(myvar interface{}) string { if t := reflect.TypeOf(myvar); t.Kind() == reflect.Ptr { return "*" + t.Elem().Name() } else { return t.Name() } } func setServices() map[string]m.Crawler { services := make(map[string]m.Crawler) services["MegaTorrents"] = m.MegaTorrents{} services["BludTV"] = m.BludTV{} services["ComandoTorrents"] = m.ComandoTorrents{} services["MeusFilmesTorrent"] = m.MeusFilmesTorrent{} services["PirateTorrent"] = m.PirateTorrent{} return services } func listServices(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listServices\n", time.Now().Format("2006-01-02 15:04:05")) var listServices []string for key, _ := range allServices { listServices = append(listServices, key) } json.NewEncoder(w).Encode(listServices) } func searchMovie(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams SearchRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested searchMovie: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) var wg sync.WaitGroup var mutex sync.Mutex var results []FoundMovies // For each service requested, start one goroutine for _, service := range postParams.Services { fmt.Printf("Searching %s\n", service) // Add one go routine to group counter wg.Add(1) // Launch goroutine go func(service m.Crawler, wg *sync.WaitGroup, mutex *sync.Mutex, total *[]FoundMovies, postParams SearchRequest) { defer wg.Done() serviceStr := getType(service) // Search movie movieLinks := m.SearchAll(service, postParams.MovieName) fmt.Printf("Found %d on %s\n", len(movieLinks), serviceStr) if movieLinks != nil { mutex.Lock() results = append(results, FoundMovies{serviceStr, movieLinks}) mutex.Unlock() } }(allServices[service], &wg, &mutex, &results, postParams) } // Wait all services finish wg.Wait() // Send found movies json.NewEncoder(w).Encode(results) } func getMagnetLinks(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Receive a movie as params var postParams DownloadLinksRequest err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested getMagnetLinks: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) // Get service struct service := allServices[postParams.Service] // Get Download Link downloadLink := postParams.Movie.Link // Get MagnetLinks var resultOptions m.FoundMagnetLinks resultOptions = service.GetDownloadLinks(downloadLink) // Send found links json.NewEncoder(w).Encode(resultOptions) } func listAllTorrents(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") fmt.Printf("[%s] - Requested listAllTorrents\n", time.Now().Format("2006-01-02 15:04:05")) err, list := ListTorrents(torrentIP) if err != nil { w.WriteHeader(http.StatusBadRequest) return } json.NewEncoder(w).Encode(list) } func addMagnetLink(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") // If is a option method, terminate function if r.Method == http.MethodOptions { return } // Decode JSON request into structure var postParams RequestAddDownload err := json.NewDecoder(r.Body).Decode(&postParams) // If throw an error when JSON decode if err != nil { http.Error(w, err.Error(), http.StatusBadRequest) return } fmt.Printf("[%s] - Requested addMagnetLink: %+v\n", time.Now().Format("2006-01-02 15:04:05"), postParams) result := AddTorrent(torrentIP, postParams.Link) if !result { http.Error(w, err.Error(), http.StatusBadRequest) return } json.NewEncoder(w).Encode(result) } func deleteTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := DeleteTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested deleteTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func resumeTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := ResumeTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested resumeTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func pauseTorrent(w http.ResponseWriter, r *http.Request) { // Set Header for Options w.Header().Set("Access-Control-Allow-Origin", "*") w.Header().Set("Access-Control-Allow-Headers", "*") params := mux.Vars(r) id := params["id"] result := StopTorrent(torrentIP, id) if !result { w.WriteHeader(http.StatusBadRequest) return } fmt.Printf("[%s] - Requested pauseTorrent: %s\n", time.Now().Format("2006-01-02 15:04:05"), id) json.NewEncoder(w).Encode(result) } func main() { ip, _ := exec.Command("hostname", "-I").Output() ipF := strings.ReplaceAll(string(ip), "\n", "") ipF = strings.ReplaceAll(ipF, "\t", "") ips := strings.Split(ipF, " ") torrentIP = ips[0] fmt.Printf("IP: %s\n", torrentIP) allServices = setServices() r := mux.NewRouter() // Handle API routes api := r.PathPrefix("/api/").Subrouter() api.HandleFunc("/listservices", listServices) api.HandleFunc("/searchMovie", searchMovie).Methods("POST", "OPTIONS") api.HandleFunc("/getMagnetLinks", getMagnetLinks).Methods("POST", "OPTIONS") // Torrent Endpoints api.HandleFunc("/listTorrents", listAllTorrents).Methods("GET") api.HandleFunc("/addMagnetLink", addMagnetLink).Methods("POST", "OPTIONS") api.HandleFunc("/deleteTorrent/{id}", deleteTorrent).Methods("GET") api.HandleFunc("/resumeTorrent/{id}", resumeTorrent).Methods("GET") api.HandleFunc("/pauseTorrent/{id}", pauseTorrent).Methods("GET") // SPA route spa := spaHandler{staticPath: "./spa", indexPath: "index.html"} r.PathPrefix("/").Handler(spa) // Set CORS r.Use(mux.CORSMethodMiddleware(r)) // Listen fmt.Printf("[%s] - Starting MoviesPI Service\n", time.Now().Format("2006-01-02 15:04:05")) log.Fatal(http.ListenAndServe(":8888", r)) }
{ // if we failed to get the absolute path respond with a 400 bad request // and stop http.Error(w, err.Error(), http.StatusBadRequest) return }
conditional_block
gologs.go
package gologs import ( "fmt" "io" "os" "path/filepath" "strconv" "strings" "sync" "sync/atomic" "time" "unicode/utf8" ) // DefaultCommandFormat specifies a log format might be more appropriate for a // infrequently used command line program, where the name of the service is a // recommended part of the log line, but the timestamp is not. const DefaultCommandFormat = "{program}: {message}" // DefaultServiceFormat specifies a log format might be more appropriate for a // service daemon, where the name of the service is implied by the filename the // logs will eventually be written to. The default timestamp format is the same // as what the standard library logs times as, but different timestamp formats // are readily available, and the timestamp format is also customizable. const DefaultServiceFormat = "{timestamp} [{level}] {message}" // Level type defines one of several possible log levels. type Level uint32 const ( // Debug is for events that might help a person understand the cause of a // bug in a program. Debug Level = iota // Verbose is for events that might help a person understand the state of a // program. Verbose // Info is for events that annotate high level status of a program. Info // Warning is for events that indicate a possible problem with the // program. Warning events should be investigated and corrected soon. Warning // Error is for events that indicate a definite problem that might prevent // normal program execution. Error events should be corrected immediately. Error ) func (l Level) String() string { switch l { case Debug: return "DEBUG" case Verbose: return "VERBOSE" case Info: return "INFO" case Warning: return "WARNING" case Error: return "ERROR" } // NOT REACHED panic(fmt.Sprintf("invalid log level: %d", uint32(l))) } // event instances are created by loggers and flow through the log tree from the // branch where they were created, down to the base, at which point, its // arguments will be formatted immediately prior to writing the log message to // the underlying log io.Writer. type event struct { args []interface{} prefix []string when time.Time format string level Level tracer bool } // base is at the bottom of the logger tree, and formats the event to a byte // slice, ensuring it ends with a newline, and writes its output to its // underlying io.Writer. type base struct { formatters []func(*event, *[]byte) w io.Writer c int // c is count of bytes to allocate for formatting log line m sync.Mutex isTimeRequired bool } func (b *base) log(e *event) error { // ??? *If* want to sacrifice a bit of speed, might consider using a // pre-allocated byte slice to format the output. The pre-allocated slice // can be protected with the lock already being used to serialize output, or // even better, its own lock so one thread can be formatting an event while // a different thread is writing the formatted event to the underlying // writer. buf := make([]byte, 0, b.c) // NOTE: This logic allows for a race between two threads that both get the // time for an event, then race for the mutex below that serializes output // to the underlying io.Writer. While not dangerous, the logic might allow // two log lines to be emitted to the writer in opposite timestamp order. if b.isTimeRequired { e.when = time.Now() } // Format the event according to the compiled formatting functions created // when the logger was created, according to the log template, i.e., // "{timestamp} [{level}] {message}". for _, formatter := range b.formatters { formatter(e, &buf) } buf = singleNewline(buf) // Serialize access to the underlying io.Writer. b.m.Lock() _, err := b.w.Write(buf) b.m.Unlock() return err } func singleNewline(buf []byte) []byte { l := len(buf) if l == 0 { return []byte{'\n'} } // While this is O(length s), it stops as soon as it finds the first non // newline character in the string starting from the right hand side of the // input string. Generally this only scans one or two characters and // returns. for i := l - 1; i >= 0; i-- { if buf[i] != '\n' { if i+1 < l && buf[i+1] == '\n' { return buf[:i+2] } return append(buf[:i+1], '\n') } } return buf[:1] // all newline characters, so just return the first one } type logger interface { log(*event) error } // Logger provides methods to create events to be logged. Logger instances are // created to emit events to their parent Logger instance, which may themselves // either filter events based on a configured level, or prefix events with a // configured string. // // When a logger is in Error mode, only Error events are logged. When a logger // is in Warning mode, only Error and Warning events are logged. When a logger // is in Info mode, only Error, Warning, and Info events are logged. When a // logger is in Verbose mode, only Error, Warning, Info, and Verbose events are // logged. When a logger is in Debug mode, all events are logged. type Logger struct { prefix string // prefix is an option string, that when not empty, will prefix events parent logger // parent is the logger this branch sends events to level Level // level is the independent log level controls for this branch tracer bool // tracer is value used to initialize new events created by this Logger } // New returns a new Logger instance that emits logged events to w after // formatting the event according to template. // // Logger instances returned by this function are initialized to Warning level, // which I feel is in keeping with the UNIX philosophy to _Avoid unnecessary // output_. Simple command line programs will not need to set the log level to // prevent spewing too many log events. While service application developers are // more likely to spend a few minutes to build in the ability to configure the // log level based on their service needs. func New(w io.Writer, template string) (*Logger, error) { formatters, isTimeRequired, err := compileFormat(template) if err != nil { return nil, err } // Create a dummy event to see how long the log line is with the provided // template. buf := make([]byte, 0, 64) var e event for _, formatter := range formatters { formatter(&e, &buf) } min := len(buf) + 64 if min < 128 { min = 128 } parent := &base{ c: min, formatters: formatters, isTimeRequired: isTimeRequired, w: w, } return &Logger{parent: parent, level: Warning}, nil } // NewBranch returns a new Logger instance that logs to parent, but has its own // log level that is independently controlled from parent. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once it arrives at the parent. func NewBranch(parent *Logger) *Logger { return &Logger{parent: parent} } // NewBranchWithPrefix returns a new Logger instance that logs to parent, but // has its own log level that is independently controlled from // parent. Furthermore, events that pass through the returned Logger will have // prefix string prefixed to the event. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once they arrive at the parent. func NewBranchWithPrefix(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix} } // NewTracer returns a new Logger instance that sets the tracer bit for events // that are logged to it. // // tl := NewTracer(logger, "[QUERY-1234] ") // make a trace logger // tl.Debug("start handling: %f", 3.14) // [QUERY-1234] start handling: 3.14 func NewTracer(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix, tracer: true} } func (b *Logger) log(e *event) error { if !e.tracer && Level(atomic.LoadUint32((*uint32)(&b.level))) > e.level { return nil } if b.prefix != "" { e.prefix = append([]string{b.prefix}, e.prefix...) } return b.parent.log(e) } // SetLevel allows changing the log level. Events must have the same log level // or higher for events to be logged. func (b *Logger) SetLevel(level Level) *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(level)) return b } // SetDebug changes the log level to Debug, which allows all events to be // logged. func (b *Logger) SetDebug() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Debug)) return b } // SetVerbose changes the log level to Verbose, which causes all Debug events to // be ignored, and all Verbose, Info, Warning, and Error events to be logged. func (b *Logger) SetVerbose() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Verbose)) return b } // SetInfo changes the log level to Info, which causes all Debug and Verbose // events to be ignored, and all Info, Warning, and Error events to be logged. func (b *Logger) SetInfo() *Logger
// SetWarning changes the log level to Warning, which causes all Debug, Verbose, // and Info events to be ignored, and all Warning, and Error events to be // logged. func (b *Logger) SetWarning() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Warning)) return b } // SetError changes the log level to Error, which causes all Debug, Verbose, // Info, and Warning events to be ignored, and all Error events to be logged. func (b *Logger) SetError() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Error)) return b } // Debug is used to inject a Debug event into the logs. func (b *Logger) Debug(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Debug { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Debug}) } // Verbose is used to inject a Verbose event into the logs. func (b *Logger) Verbose(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Verbose { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Verbose}) } // Info is used to inject a Info event into the logs. func (b *Logger) Info(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Info { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Info}) } // Warning is used to inject a Warning event into the logs. func (b *Logger) Warning(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Warning { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Warning}) } // Error is used to inject a Error event into the logs. func (b *Logger) Error(format string, args ...interface{}) error { var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Error}) } // compileFormat converts the format string into a slice of functions to invoke // when creating a log line. func compileFormat(format string) ([]func(*event, *[]byte), bool, error) { // build slice of emitter functions, each will emit the requested // information var emitters []func(*event, *[]byte) // Implemented as a state machine that alternates between 2 states: either // capturing runes for the next constant buffer, or capturing runes for the // next token var buf, token []byte var indexOpenCurlyBrace int // index of most recent open curly brace var isCapturingToken bool // true after open curly brace until next close curly brace var isPrevRuneBackslash bool // true when previous rune was backslash var isPrevRuneNewline bool // true when rune most recently read is newline var isTimeRequired bool // true when any of the formatters require system time for ri, rune := range format { isPrevRuneNewline = rune == '\n' if isPrevRuneBackslash { // When this rune has been escaped, then just write it out to // whichever buffer we're collecting to right now. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } isPrevRuneBackslash = false continue } switch rune { case '\\': isPrevRuneBackslash = true case '{': if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with embedded curly braces; runes %d and %d", indexOpenCurlyBrace, ri) } // Stop capturing buf, and begin capturing token. emitters = append(emitters, makeStringEmitter(string(buf))) buf = buf[:0] isCapturingToken = true indexOpenCurlyBrace = ri case '}': if !isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched closing curly braces; rune %d", ri) } // Stop capturing token, and begin capturing buf. switch tok := string(token); tok { case "epoch": isTimeRequired = true emitters = append(emitters, epochEmitter) case "iso8601": isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter(time.RFC3339)) case "level": emitters = append(emitters, levelEmitter) case "message": emitters = append(emitters, messageEmitter) case "program": emitters = append(emitters, makeProgramEmitter()) case "timestamp": // Emulate timestamp format from stdlib log (log.LstdFlags). isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter("2006/01/02 15:04:05")) default: // ??? Not sure how I feel about the below API. if strings.HasPrefix(tok, "localtime=") { emitters = append(emitters, makeLocalTimestampEmitter(tok[10:])) } else if strings.HasPrefix(tok, "utctime=") { emitters = append(emitters, makeUTCTimestampEmitter(tok[8:])) } else { return nil, false, fmt.Errorf("cannot compile log format with unknown formatting verb %q", token) } isTimeRequired = true } token = token[:0] isCapturingToken = false default: // Append rune to either token or buf. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } } } if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched opening curly braces; rune %d", indexOpenCurlyBrace) } if !isPrevRuneNewline { buf = append(buf, '\n') // terminate each log line with newline byte } if len(buf) > 0 { emitters = append(emitters, makeStringEmitter(string(buf))) } return emitters, isTimeRequired, nil } func appendRune(buf *[]byte, r rune) { if r < utf8.RuneSelf { *buf = append(*buf, byte(r)) return } olen := len(*buf) *buf = append(*buf, 0, 0, 0, 0) // grow buf large enough to accommodate largest possible UTF8 sequence n := utf8.EncodeRune((*buf)[olen:olen+4], r) // encode rune into newly allocated buf space *buf = (*buf)[:olen+n] // trim buf to actual size used by rune addition } func epochEmitter(e *event, bb *[]byte) { *bb = append(*bb, strconv.FormatInt(e.when.UTC().Unix(), 10)...) } func levelEmitter(e *event, bb *[]byte) { *bb = append(*bb, e.level.String()...) } var program string func makeProgramEmitter() func(e *event, bb *[]byte) { if program == "" { var err error program, err = os.Executable() if err != nil { program = os.Args[0] } program = filepath.Base(program) } return func(e *event, bb *[]byte) { *bb = append(*bb, program...) } } func makeStringEmitter(value string) func(*event, *[]byte) { return func(_ *event, bb *[]byte) { *bb = append(*bb, value...) } } func makeLocalTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.Format(format)...) } } func makeUTCTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.UTC().Format(format)...) } } func messageEmitter(e *event, bb *[]byte) { *bb = append(*bb, strings.Join(e.prefix, "")...) // emit the event's prefix ??? *bb = append(*bb, fmt.Sprintf(e.format, e.args...)...) // followed by the event message }
{ atomic.StoreUint32((*uint32)(&b.level), uint32(Info)) return b }
identifier_body
gologs.go
package gologs import ( "fmt" "io" "os" "path/filepath" "strconv" "strings" "sync" "sync/atomic" "time" "unicode/utf8" ) // DefaultCommandFormat specifies a log format might be more appropriate for a // infrequently used command line program, where the name of the service is a // recommended part of the log line, but the timestamp is not. const DefaultCommandFormat = "{program}: {message}" // DefaultServiceFormat specifies a log format might be more appropriate for a // service daemon, where the name of the service is implied by the filename the // logs will eventually be written to. The default timestamp format is the same // as what the standard library logs times as, but different timestamp formats // are readily available, and the timestamp format is also customizable. const DefaultServiceFormat = "{timestamp} [{level}] {message}" // Level type defines one of several possible log levels. type Level uint32 const ( // Debug is for events that might help a person understand the cause of a // bug in a program. Debug Level = iota // Verbose is for events that might help a person understand the state of a // program. Verbose // Info is for events that annotate high level status of a program. Info // Warning is for events that indicate a possible problem with the // program. Warning events should be investigated and corrected soon. Warning // Error is for events that indicate a definite problem that might prevent // normal program execution. Error events should be corrected immediately. Error ) func (l Level) String() string { switch l { case Debug: return "DEBUG" case Verbose: return "VERBOSE" case Info: return "INFO" case Warning: return "WARNING" case Error: return "ERROR" } // NOT REACHED panic(fmt.Sprintf("invalid log level: %d", uint32(l))) } // event instances are created by loggers and flow through the log tree from the // branch where they were created, down to the base, at which point, its // arguments will be formatted immediately prior to writing the log message to // the underlying log io.Writer. type event struct { args []interface{} prefix []string when time.Time format string level Level tracer bool } // base is at the bottom of the logger tree, and formats the event to a byte // slice, ensuring it ends with a newline, and writes its output to its // underlying io.Writer. type base struct { formatters []func(*event, *[]byte) w io.Writer c int // c is count of bytes to allocate for formatting log line m sync.Mutex isTimeRequired bool } func (b *base)
(e *event) error { // ??? *If* want to sacrifice a bit of speed, might consider using a // pre-allocated byte slice to format the output. The pre-allocated slice // can be protected with the lock already being used to serialize output, or // even better, its own lock so one thread can be formatting an event while // a different thread is writing the formatted event to the underlying // writer. buf := make([]byte, 0, b.c) // NOTE: This logic allows for a race between two threads that both get the // time for an event, then race for the mutex below that serializes output // to the underlying io.Writer. While not dangerous, the logic might allow // two log lines to be emitted to the writer in opposite timestamp order. if b.isTimeRequired { e.when = time.Now() } // Format the event according to the compiled formatting functions created // when the logger was created, according to the log template, i.e., // "{timestamp} [{level}] {message}". for _, formatter := range b.formatters { formatter(e, &buf) } buf = singleNewline(buf) // Serialize access to the underlying io.Writer. b.m.Lock() _, err := b.w.Write(buf) b.m.Unlock() return err } func singleNewline(buf []byte) []byte { l := len(buf) if l == 0 { return []byte{'\n'} } // While this is O(length s), it stops as soon as it finds the first non // newline character in the string starting from the right hand side of the // input string. Generally this only scans one or two characters and // returns. for i := l - 1; i >= 0; i-- { if buf[i] != '\n' { if i+1 < l && buf[i+1] == '\n' { return buf[:i+2] } return append(buf[:i+1], '\n') } } return buf[:1] // all newline characters, so just return the first one } type logger interface { log(*event) error } // Logger provides methods to create events to be logged. Logger instances are // created to emit events to their parent Logger instance, which may themselves // either filter events based on a configured level, or prefix events with a // configured string. // // When a logger is in Error mode, only Error events are logged. When a logger // is in Warning mode, only Error and Warning events are logged. When a logger // is in Info mode, only Error, Warning, and Info events are logged. When a // logger is in Verbose mode, only Error, Warning, Info, and Verbose events are // logged. When a logger is in Debug mode, all events are logged. type Logger struct { prefix string // prefix is an option string, that when not empty, will prefix events parent logger // parent is the logger this branch sends events to level Level // level is the independent log level controls for this branch tracer bool // tracer is value used to initialize new events created by this Logger } // New returns a new Logger instance that emits logged events to w after // formatting the event according to template. // // Logger instances returned by this function are initialized to Warning level, // which I feel is in keeping with the UNIX philosophy to _Avoid unnecessary // output_. Simple command line programs will not need to set the log level to // prevent spewing too many log events. While service application developers are // more likely to spend a few minutes to build in the ability to configure the // log level based on their service needs. func New(w io.Writer, template string) (*Logger, error) { formatters, isTimeRequired, err := compileFormat(template) if err != nil { return nil, err } // Create a dummy event to see how long the log line is with the provided // template. buf := make([]byte, 0, 64) var e event for _, formatter := range formatters { formatter(&e, &buf) } min := len(buf) + 64 if min < 128 { min = 128 } parent := &base{ c: min, formatters: formatters, isTimeRequired: isTimeRequired, w: w, } return &Logger{parent: parent, level: Warning}, nil } // NewBranch returns a new Logger instance that logs to parent, but has its own // log level that is independently controlled from parent. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once it arrives at the parent. func NewBranch(parent *Logger) *Logger { return &Logger{parent: parent} } // NewBranchWithPrefix returns a new Logger instance that logs to parent, but // has its own log level that is independently controlled from // parent. Furthermore, events that pass through the returned Logger will have // prefix string prefixed to the event. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once they arrive at the parent. func NewBranchWithPrefix(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix} } // NewTracer returns a new Logger instance that sets the tracer bit for events // that are logged to it. // // tl := NewTracer(logger, "[QUERY-1234] ") // make a trace logger // tl.Debug("start handling: %f", 3.14) // [QUERY-1234] start handling: 3.14 func NewTracer(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix, tracer: true} } func (b *Logger) log(e *event) error { if !e.tracer && Level(atomic.LoadUint32((*uint32)(&b.level))) > e.level { return nil } if b.prefix != "" { e.prefix = append([]string{b.prefix}, e.prefix...) } return b.parent.log(e) } // SetLevel allows changing the log level. Events must have the same log level // or higher for events to be logged. func (b *Logger) SetLevel(level Level) *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(level)) return b } // SetDebug changes the log level to Debug, which allows all events to be // logged. func (b *Logger) SetDebug() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Debug)) return b } // SetVerbose changes the log level to Verbose, which causes all Debug events to // be ignored, and all Verbose, Info, Warning, and Error events to be logged. func (b *Logger) SetVerbose() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Verbose)) return b } // SetInfo changes the log level to Info, which causes all Debug and Verbose // events to be ignored, and all Info, Warning, and Error events to be logged. func (b *Logger) SetInfo() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Info)) return b } // SetWarning changes the log level to Warning, which causes all Debug, Verbose, // and Info events to be ignored, and all Warning, and Error events to be // logged. func (b *Logger) SetWarning() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Warning)) return b } // SetError changes the log level to Error, which causes all Debug, Verbose, // Info, and Warning events to be ignored, and all Error events to be logged. func (b *Logger) SetError() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Error)) return b } // Debug is used to inject a Debug event into the logs. func (b *Logger) Debug(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Debug { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Debug}) } // Verbose is used to inject a Verbose event into the logs. func (b *Logger) Verbose(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Verbose { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Verbose}) } // Info is used to inject a Info event into the logs. func (b *Logger) Info(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Info { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Info}) } // Warning is used to inject a Warning event into the logs. func (b *Logger) Warning(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Warning { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Warning}) } // Error is used to inject a Error event into the logs. func (b *Logger) Error(format string, args ...interface{}) error { var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Error}) } // compileFormat converts the format string into a slice of functions to invoke // when creating a log line. func compileFormat(format string) ([]func(*event, *[]byte), bool, error) { // build slice of emitter functions, each will emit the requested // information var emitters []func(*event, *[]byte) // Implemented as a state machine that alternates between 2 states: either // capturing runes for the next constant buffer, or capturing runes for the // next token var buf, token []byte var indexOpenCurlyBrace int // index of most recent open curly brace var isCapturingToken bool // true after open curly brace until next close curly brace var isPrevRuneBackslash bool // true when previous rune was backslash var isPrevRuneNewline bool // true when rune most recently read is newline var isTimeRequired bool // true when any of the formatters require system time for ri, rune := range format { isPrevRuneNewline = rune == '\n' if isPrevRuneBackslash { // When this rune has been escaped, then just write it out to // whichever buffer we're collecting to right now. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } isPrevRuneBackslash = false continue } switch rune { case '\\': isPrevRuneBackslash = true case '{': if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with embedded curly braces; runes %d and %d", indexOpenCurlyBrace, ri) } // Stop capturing buf, and begin capturing token. emitters = append(emitters, makeStringEmitter(string(buf))) buf = buf[:0] isCapturingToken = true indexOpenCurlyBrace = ri case '}': if !isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched closing curly braces; rune %d", ri) } // Stop capturing token, and begin capturing buf. switch tok := string(token); tok { case "epoch": isTimeRequired = true emitters = append(emitters, epochEmitter) case "iso8601": isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter(time.RFC3339)) case "level": emitters = append(emitters, levelEmitter) case "message": emitters = append(emitters, messageEmitter) case "program": emitters = append(emitters, makeProgramEmitter()) case "timestamp": // Emulate timestamp format from stdlib log (log.LstdFlags). isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter("2006/01/02 15:04:05")) default: // ??? Not sure how I feel about the below API. if strings.HasPrefix(tok, "localtime=") { emitters = append(emitters, makeLocalTimestampEmitter(tok[10:])) } else if strings.HasPrefix(tok, "utctime=") { emitters = append(emitters, makeUTCTimestampEmitter(tok[8:])) } else { return nil, false, fmt.Errorf("cannot compile log format with unknown formatting verb %q", token) } isTimeRequired = true } token = token[:0] isCapturingToken = false default: // Append rune to either token or buf. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } } } if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched opening curly braces; rune %d", indexOpenCurlyBrace) } if !isPrevRuneNewline { buf = append(buf, '\n') // terminate each log line with newline byte } if len(buf) > 0 { emitters = append(emitters, makeStringEmitter(string(buf))) } return emitters, isTimeRequired, nil } func appendRune(buf *[]byte, r rune) { if r < utf8.RuneSelf { *buf = append(*buf, byte(r)) return } olen := len(*buf) *buf = append(*buf, 0, 0, 0, 0) // grow buf large enough to accommodate largest possible UTF8 sequence n := utf8.EncodeRune((*buf)[olen:olen+4], r) // encode rune into newly allocated buf space *buf = (*buf)[:olen+n] // trim buf to actual size used by rune addition } func epochEmitter(e *event, bb *[]byte) { *bb = append(*bb, strconv.FormatInt(e.when.UTC().Unix(), 10)...) } func levelEmitter(e *event, bb *[]byte) { *bb = append(*bb, e.level.String()...) } var program string func makeProgramEmitter() func(e *event, bb *[]byte) { if program == "" { var err error program, err = os.Executable() if err != nil { program = os.Args[0] } program = filepath.Base(program) } return func(e *event, bb *[]byte) { *bb = append(*bb, program...) } } func makeStringEmitter(value string) func(*event, *[]byte) { return func(_ *event, bb *[]byte) { *bb = append(*bb, value...) } } func makeLocalTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.Format(format)...) } } func makeUTCTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.UTC().Format(format)...) } } func messageEmitter(e *event, bb *[]byte) { *bb = append(*bb, strings.Join(e.prefix, "")...) // emit the event's prefix ??? *bb = append(*bb, fmt.Sprintf(e.format, e.args...)...) // followed by the event message }
log
identifier_name
gologs.go
package gologs import ( "fmt" "io" "os" "path/filepath" "strconv" "strings" "sync" "sync/atomic" "time" "unicode/utf8" ) // DefaultCommandFormat specifies a log format might be more appropriate for a // infrequently used command line program, where the name of the service is a // recommended part of the log line, but the timestamp is not. const DefaultCommandFormat = "{program}: {message}" // DefaultServiceFormat specifies a log format might be more appropriate for a // service daemon, where the name of the service is implied by the filename the // logs will eventually be written to. The default timestamp format is the same // as what the standard library logs times as, but different timestamp formats // are readily available, and the timestamp format is also customizable. const DefaultServiceFormat = "{timestamp} [{level}] {message}" // Level type defines one of several possible log levels. type Level uint32 const ( // Debug is for events that might help a person understand the cause of a // bug in a program. Debug Level = iota // Verbose is for events that might help a person understand the state of a // program. Verbose // Info is for events that annotate high level status of a program. Info // Warning is for events that indicate a possible problem with the // program. Warning events should be investigated and corrected soon. Warning // Error is for events that indicate a definite problem that might prevent // normal program execution. Error events should be corrected immediately. Error ) func (l Level) String() string { switch l { case Debug: return "DEBUG" case Verbose: return "VERBOSE" case Info: return "INFO" case Warning: return "WARNING" case Error: return "ERROR" } // NOT REACHED panic(fmt.Sprintf("invalid log level: %d", uint32(l))) } // event instances are created by loggers and flow through the log tree from the // branch where they were created, down to the base, at which point, its // arguments will be formatted immediately prior to writing the log message to // the underlying log io.Writer. type event struct { args []interface{} prefix []string when time.Time format string level Level tracer bool } // base is at the bottom of the logger tree, and formats the event to a byte // slice, ensuring it ends with a newline, and writes its output to its // underlying io.Writer. type base struct { formatters []func(*event, *[]byte) w io.Writer c int // c is count of bytes to allocate for formatting log line m sync.Mutex isTimeRequired bool } func (b *base) log(e *event) error { // ??? *If* want to sacrifice a bit of speed, might consider using a // pre-allocated byte slice to format the output. The pre-allocated slice // can be protected with the lock already being used to serialize output, or // even better, its own lock so one thread can be formatting an event while // a different thread is writing the formatted event to the underlying // writer. buf := make([]byte, 0, b.c) // NOTE: This logic allows for a race between two threads that both get the // time for an event, then race for the mutex below that serializes output // to the underlying io.Writer. While not dangerous, the logic might allow // two log lines to be emitted to the writer in opposite timestamp order. if b.isTimeRequired { e.when = time.Now() } // Format the event according to the compiled formatting functions created // when the logger was created, according to the log template, i.e., // "{timestamp} [{level}] {message}". for _, formatter := range b.formatters { formatter(e, &buf) } buf = singleNewline(buf) // Serialize access to the underlying io.Writer. b.m.Lock() _, err := b.w.Write(buf) b.m.Unlock() return err } func singleNewline(buf []byte) []byte { l := len(buf) if l == 0 { return []byte{'\n'} } // While this is O(length s), it stops as soon as it finds the first non // newline character in the string starting from the right hand side of the // input string. Generally this only scans one or two characters and // returns. for i := l - 1; i >= 0; i--
return buf[:1] // all newline characters, so just return the first one } type logger interface { log(*event) error } // Logger provides methods to create events to be logged. Logger instances are // created to emit events to their parent Logger instance, which may themselves // either filter events based on a configured level, or prefix events with a // configured string. // // When a logger is in Error mode, only Error events are logged. When a logger // is in Warning mode, only Error and Warning events are logged. When a logger // is in Info mode, only Error, Warning, and Info events are logged. When a // logger is in Verbose mode, only Error, Warning, Info, and Verbose events are // logged. When a logger is in Debug mode, all events are logged. type Logger struct { prefix string // prefix is an option string, that when not empty, will prefix events parent logger // parent is the logger this branch sends events to level Level // level is the independent log level controls for this branch tracer bool // tracer is value used to initialize new events created by this Logger } // New returns a new Logger instance that emits logged events to w after // formatting the event according to template. // // Logger instances returned by this function are initialized to Warning level, // which I feel is in keeping with the UNIX philosophy to _Avoid unnecessary // output_. Simple command line programs will not need to set the log level to // prevent spewing too many log events. While service application developers are // more likely to spend a few minutes to build in the ability to configure the // log level based on their service needs. func New(w io.Writer, template string) (*Logger, error) { formatters, isTimeRequired, err := compileFormat(template) if err != nil { return nil, err } // Create a dummy event to see how long the log line is with the provided // template. buf := make([]byte, 0, 64) var e event for _, formatter := range formatters { formatter(&e, &buf) } min := len(buf) + 64 if min < 128 { min = 128 } parent := &base{ c: min, formatters: formatters, isTimeRequired: isTimeRequired, w: w, } return &Logger{parent: parent, level: Warning}, nil } // NewBranch returns a new Logger instance that logs to parent, but has its own // log level that is independently controlled from parent. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once it arrives at the parent. func NewBranch(parent *Logger) *Logger { return &Logger{parent: parent} } // NewBranchWithPrefix returns a new Logger instance that logs to parent, but // has its own log level that is independently controlled from // parent. Furthermore, events that pass through the returned Logger will have // prefix string prefixed to the event. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once they arrive at the parent. func NewBranchWithPrefix(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix} } // NewTracer returns a new Logger instance that sets the tracer bit for events // that are logged to it. // // tl := NewTracer(logger, "[QUERY-1234] ") // make a trace logger // tl.Debug("start handling: %f", 3.14) // [QUERY-1234] start handling: 3.14 func NewTracer(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix, tracer: true} } func (b *Logger) log(e *event) error { if !e.tracer && Level(atomic.LoadUint32((*uint32)(&b.level))) > e.level { return nil } if b.prefix != "" { e.prefix = append([]string{b.prefix}, e.prefix...) } return b.parent.log(e) } // SetLevel allows changing the log level. Events must have the same log level // or higher for events to be logged. func (b *Logger) SetLevel(level Level) *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(level)) return b } // SetDebug changes the log level to Debug, which allows all events to be // logged. func (b *Logger) SetDebug() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Debug)) return b } // SetVerbose changes the log level to Verbose, which causes all Debug events to // be ignored, and all Verbose, Info, Warning, and Error events to be logged. func (b *Logger) SetVerbose() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Verbose)) return b } // SetInfo changes the log level to Info, which causes all Debug and Verbose // events to be ignored, and all Info, Warning, and Error events to be logged. func (b *Logger) SetInfo() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Info)) return b } // SetWarning changes the log level to Warning, which causes all Debug, Verbose, // and Info events to be ignored, and all Warning, and Error events to be // logged. func (b *Logger) SetWarning() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Warning)) return b } // SetError changes the log level to Error, which causes all Debug, Verbose, // Info, and Warning events to be ignored, and all Error events to be logged. func (b *Logger) SetError() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Error)) return b } // Debug is used to inject a Debug event into the logs. func (b *Logger) Debug(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Debug { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Debug}) } // Verbose is used to inject a Verbose event into the logs. func (b *Logger) Verbose(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Verbose { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Verbose}) } // Info is used to inject a Info event into the logs. func (b *Logger) Info(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Info { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Info}) } // Warning is used to inject a Warning event into the logs. func (b *Logger) Warning(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Warning { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Warning}) } // Error is used to inject a Error event into the logs. func (b *Logger) Error(format string, args ...interface{}) error { var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Error}) } // compileFormat converts the format string into a slice of functions to invoke // when creating a log line. func compileFormat(format string) ([]func(*event, *[]byte), bool, error) { // build slice of emitter functions, each will emit the requested // information var emitters []func(*event, *[]byte) // Implemented as a state machine that alternates between 2 states: either // capturing runes for the next constant buffer, or capturing runes for the // next token var buf, token []byte var indexOpenCurlyBrace int // index of most recent open curly brace var isCapturingToken bool // true after open curly brace until next close curly brace var isPrevRuneBackslash bool // true when previous rune was backslash var isPrevRuneNewline bool // true when rune most recently read is newline var isTimeRequired bool // true when any of the formatters require system time for ri, rune := range format { isPrevRuneNewline = rune == '\n' if isPrevRuneBackslash { // When this rune has been escaped, then just write it out to // whichever buffer we're collecting to right now. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } isPrevRuneBackslash = false continue } switch rune { case '\\': isPrevRuneBackslash = true case '{': if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with embedded curly braces; runes %d and %d", indexOpenCurlyBrace, ri) } // Stop capturing buf, and begin capturing token. emitters = append(emitters, makeStringEmitter(string(buf))) buf = buf[:0] isCapturingToken = true indexOpenCurlyBrace = ri case '}': if !isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched closing curly braces; rune %d", ri) } // Stop capturing token, and begin capturing buf. switch tok := string(token); tok { case "epoch": isTimeRequired = true emitters = append(emitters, epochEmitter) case "iso8601": isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter(time.RFC3339)) case "level": emitters = append(emitters, levelEmitter) case "message": emitters = append(emitters, messageEmitter) case "program": emitters = append(emitters, makeProgramEmitter()) case "timestamp": // Emulate timestamp format from stdlib log (log.LstdFlags). isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter("2006/01/02 15:04:05")) default: // ??? Not sure how I feel about the below API. if strings.HasPrefix(tok, "localtime=") { emitters = append(emitters, makeLocalTimestampEmitter(tok[10:])) } else if strings.HasPrefix(tok, "utctime=") { emitters = append(emitters, makeUTCTimestampEmitter(tok[8:])) } else { return nil, false, fmt.Errorf("cannot compile log format with unknown formatting verb %q", token) } isTimeRequired = true } token = token[:0] isCapturingToken = false default: // Append rune to either token or buf. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } } } if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched opening curly braces; rune %d", indexOpenCurlyBrace) } if !isPrevRuneNewline { buf = append(buf, '\n') // terminate each log line with newline byte } if len(buf) > 0 { emitters = append(emitters, makeStringEmitter(string(buf))) } return emitters, isTimeRequired, nil } func appendRune(buf *[]byte, r rune) { if r < utf8.RuneSelf { *buf = append(*buf, byte(r)) return } olen := len(*buf) *buf = append(*buf, 0, 0, 0, 0) // grow buf large enough to accommodate largest possible UTF8 sequence n := utf8.EncodeRune((*buf)[olen:olen+4], r) // encode rune into newly allocated buf space *buf = (*buf)[:olen+n] // trim buf to actual size used by rune addition } func epochEmitter(e *event, bb *[]byte) { *bb = append(*bb, strconv.FormatInt(e.when.UTC().Unix(), 10)...) } func levelEmitter(e *event, bb *[]byte) { *bb = append(*bb, e.level.String()...) } var program string func makeProgramEmitter() func(e *event, bb *[]byte) { if program == "" { var err error program, err = os.Executable() if err != nil { program = os.Args[0] } program = filepath.Base(program) } return func(e *event, bb *[]byte) { *bb = append(*bb, program...) } } func makeStringEmitter(value string) func(*event, *[]byte) { return func(_ *event, bb *[]byte) { *bb = append(*bb, value...) } } func makeLocalTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.Format(format)...) } } func makeUTCTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.UTC().Format(format)...) } } func messageEmitter(e *event, bb *[]byte) { *bb = append(*bb, strings.Join(e.prefix, "")...) // emit the event's prefix ??? *bb = append(*bb, fmt.Sprintf(e.format, e.args...)...) // followed by the event message }
{ if buf[i] != '\n' { if i+1 < l && buf[i+1] == '\n' { return buf[:i+2] } return append(buf[:i+1], '\n') } }
conditional_block
gologs.go
package gologs import ( "fmt" "io" "os" "path/filepath" "strconv" "strings" "sync" "sync/atomic" "time" "unicode/utf8" ) // DefaultCommandFormat specifies a log format might be more appropriate for a // infrequently used command line program, where the name of the service is a // recommended part of the log line, but the timestamp is not. const DefaultCommandFormat = "{program}: {message}" // DefaultServiceFormat specifies a log format might be more appropriate for a // service daemon, where the name of the service is implied by the filename the // logs will eventually be written to. The default timestamp format is the same // as what the standard library logs times as, but different timestamp formats // are readily available, and the timestamp format is also customizable. const DefaultServiceFormat = "{timestamp} [{level}] {message}" // Level type defines one of several possible log levels. type Level uint32 const ( // Debug is for events that might help a person understand the cause of a // bug in a program. Debug Level = iota // Verbose is for events that might help a person understand the state of a // program. Verbose // Info is for events that annotate high level status of a program. Info // Warning is for events that indicate a possible problem with the // program. Warning events should be investigated and corrected soon. Warning // Error is for events that indicate a definite problem that might prevent // normal program execution. Error events should be corrected immediately. Error ) func (l Level) String() string { switch l { case Debug: return "DEBUG" case Verbose: return "VERBOSE" case Info: return "INFO" case Warning: return "WARNING" case Error: return "ERROR" } // NOT REACHED panic(fmt.Sprintf("invalid log level: %d", uint32(l))) } // event instances are created by loggers and flow through the log tree from the // branch where they were created, down to the base, at which point, its // arguments will be formatted immediately prior to writing the log message to // the underlying log io.Writer. type event struct { args []interface{} prefix []string when time.Time format string level Level tracer bool } // base is at the bottom of the logger tree, and formats the event to a byte // slice, ensuring it ends with a newline, and writes its output to its // underlying io.Writer. type base struct { formatters []func(*event, *[]byte) w io.Writer c int // c is count of bytes to allocate for formatting log line m sync.Mutex isTimeRequired bool } func (b *base) log(e *event) error { // ??? *If* want to sacrifice a bit of speed, might consider using a // pre-allocated byte slice to format the output. The pre-allocated slice // can be protected with the lock already being used to serialize output, or // even better, its own lock so one thread can be formatting an event while // a different thread is writing the formatted event to the underlying // writer. buf := make([]byte, 0, b.c) // NOTE: This logic allows for a race between two threads that both get the // time for an event, then race for the mutex below that serializes output // to the underlying io.Writer. While not dangerous, the logic might allow // two log lines to be emitted to the writer in opposite timestamp order. if b.isTimeRequired { e.when = time.Now() } // Format the event according to the compiled formatting functions created // when the logger was created, according to the log template, i.e., // "{timestamp} [{level}] {message}". for _, formatter := range b.formatters { formatter(e, &buf) } buf = singleNewline(buf) // Serialize access to the underlying io.Writer. b.m.Lock() _, err := b.w.Write(buf) b.m.Unlock() return err } func singleNewline(buf []byte) []byte { l := len(buf) if l == 0 { return []byte{'\n'} } // While this is O(length s), it stops as soon as it finds the first non // newline character in the string starting from the right hand side of the // input string. Generally this only scans one or two characters and // returns. for i := l - 1; i >= 0; i-- { if buf[i] != '\n' { if i+1 < l && buf[i+1] == '\n' { return buf[:i+2] } return append(buf[:i+1], '\n') } } return buf[:1] // all newline characters, so just return the first one } type logger interface { log(*event) error } // Logger provides methods to create events to be logged. Logger instances are // created to emit events to their parent Logger instance, which may themselves // either filter events based on a configured level, or prefix events with a // configured string. // // When a logger is in Error mode, only Error events are logged. When a logger // is in Warning mode, only Error and Warning events are logged. When a logger // is in Info mode, only Error, Warning, and Info events are logged. When a // logger is in Verbose mode, only Error, Warning, Info, and Verbose events are // logged. When a logger is in Debug mode, all events are logged. type Logger struct { prefix string // prefix is an option string, that when not empty, will prefix events parent logger // parent is the logger this branch sends events to level Level // level is the independent log level controls for this branch tracer bool // tracer is value used to initialize new events created by this Logger } // New returns a new Logger instance that emits logged events to w after // formatting the event according to template. // // Logger instances returned by this function are initialized to Warning level, // which I feel is in keeping with the UNIX philosophy to _Avoid unnecessary // output_. Simple command line programs will not need to set the log level to // prevent spewing too many log events. While service application developers are // more likely to spend a few minutes to build in the ability to configure the // log level based on their service needs. func New(w io.Writer, template string) (*Logger, error) { formatters, isTimeRequired, err := compileFormat(template) if err != nil { return nil, err } // Create a dummy event to see how long the log line is with the provided // template. buf := make([]byte, 0, 64) var e event for _, formatter := range formatters { formatter(&e, &buf) } min := len(buf) + 64 if min < 128 { min = 128 } parent := &base{ c: min, formatters: formatters, isTimeRequired: isTimeRequired, w: w, } return &Logger{parent: parent, level: Warning}, nil } // NewBranch returns a new Logger instance that logs to parent, but has its own // log level that is independently controlled from parent. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once it arrives at the parent. func NewBranch(parent *Logger) *Logger { return &Logger{parent: parent} } // NewBranchWithPrefix returns a new Logger instance that logs to parent, but // has its own log level that is independently controlled from // parent. Furthermore, events that pass through the returned Logger will have // prefix string prefixed to the event. // // Note that events are filtered as the flow from their origin branch to the // base. When a parent Logger has a more restrictive log level than a child // Logger, the event might pass through from a child to its parent, but be // filtered out once they arrive at the parent. func NewBranchWithPrefix(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix} } // NewTracer returns a new Logger instance that sets the tracer bit for events // that are logged to it. // // tl := NewTracer(logger, "[QUERY-1234] ") // make a trace logger // tl.Debug("start handling: %f", 3.14) // [QUERY-1234] start handling: 3.14 func NewTracer(parent *Logger, prefix string) *Logger { return &Logger{parent: parent, prefix: prefix, tracer: true} } func (b *Logger) log(e *event) error { if !e.tracer && Level(atomic.LoadUint32((*uint32)(&b.level))) > e.level { return nil }
if b.prefix != "" { e.prefix = append([]string{b.prefix}, e.prefix...) } return b.parent.log(e) } // SetLevel allows changing the log level. Events must have the same log level // or higher for events to be logged. func (b *Logger) SetLevel(level Level) *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(level)) return b } // SetDebug changes the log level to Debug, which allows all events to be // logged. func (b *Logger) SetDebug() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Debug)) return b } // SetVerbose changes the log level to Verbose, which causes all Debug events to // be ignored, and all Verbose, Info, Warning, and Error events to be logged. func (b *Logger) SetVerbose() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Verbose)) return b } // SetInfo changes the log level to Info, which causes all Debug and Verbose // events to be ignored, and all Info, Warning, and Error events to be logged. func (b *Logger) SetInfo() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Info)) return b } // SetWarning changes the log level to Warning, which causes all Debug, Verbose, // and Info events to be ignored, and all Warning, and Error events to be // logged. func (b *Logger) SetWarning() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Warning)) return b } // SetError changes the log level to Error, which causes all Debug, Verbose, // Info, and Warning events to be ignored, and all Error events to be logged. func (b *Logger) SetError() *Logger { atomic.StoreUint32((*uint32)(&b.level), uint32(Error)) return b } // Debug is used to inject a Debug event into the logs. func (b *Logger) Debug(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Debug { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Debug}) } // Verbose is used to inject a Verbose event into the logs. func (b *Logger) Verbose(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Verbose { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Verbose}) } // Info is used to inject a Info event into the logs. func (b *Logger) Info(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Info { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Info}) } // Warning is used to inject a Warning event into the logs. func (b *Logger) Warning(format string, args ...interface{}) error { if Level(atomic.LoadUint32((*uint32)(&b.level))) > Warning { return nil } var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Warning}) } // Error is used to inject a Error event into the logs. func (b *Logger) Error(format string, args ...interface{}) error { var prefix []string if b.prefix != "" { prefix = []string{b.prefix} } return b.parent.log(&event{format: format, args: args, prefix: prefix, tracer: b.tracer, level: Error}) } // compileFormat converts the format string into a slice of functions to invoke // when creating a log line. func compileFormat(format string) ([]func(*event, *[]byte), bool, error) { // build slice of emitter functions, each will emit the requested // information var emitters []func(*event, *[]byte) // Implemented as a state machine that alternates between 2 states: either // capturing runes for the next constant buffer, or capturing runes for the // next token var buf, token []byte var indexOpenCurlyBrace int // index of most recent open curly brace var isCapturingToken bool // true after open curly brace until next close curly brace var isPrevRuneBackslash bool // true when previous rune was backslash var isPrevRuneNewline bool // true when rune most recently read is newline var isTimeRequired bool // true when any of the formatters require system time for ri, rune := range format { isPrevRuneNewline = rune == '\n' if isPrevRuneBackslash { // When this rune has been escaped, then just write it out to // whichever buffer we're collecting to right now. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } isPrevRuneBackslash = false continue } switch rune { case '\\': isPrevRuneBackslash = true case '{': if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with embedded curly braces; runes %d and %d", indexOpenCurlyBrace, ri) } // Stop capturing buf, and begin capturing token. emitters = append(emitters, makeStringEmitter(string(buf))) buf = buf[:0] isCapturingToken = true indexOpenCurlyBrace = ri case '}': if !isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched closing curly braces; rune %d", ri) } // Stop capturing token, and begin capturing buf. switch tok := string(token); tok { case "epoch": isTimeRequired = true emitters = append(emitters, epochEmitter) case "iso8601": isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter(time.RFC3339)) case "level": emitters = append(emitters, levelEmitter) case "message": emitters = append(emitters, messageEmitter) case "program": emitters = append(emitters, makeProgramEmitter()) case "timestamp": // Emulate timestamp format from stdlib log (log.LstdFlags). isTimeRequired = true emitters = append(emitters, makeUTCTimestampEmitter("2006/01/02 15:04:05")) default: // ??? Not sure how I feel about the below API. if strings.HasPrefix(tok, "localtime=") { emitters = append(emitters, makeLocalTimestampEmitter(tok[10:])) } else if strings.HasPrefix(tok, "utctime=") { emitters = append(emitters, makeUTCTimestampEmitter(tok[8:])) } else { return nil, false, fmt.Errorf("cannot compile log format with unknown formatting verb %q", token) } isTimeRequired = true } token = token[:0] isCapturingToken = false default: // Append rune to either token or buf. if isCapturingToken { appendRune(&token, rune) } else { appendRune(&buf, rune) } } } if isCapturingToken { return nil, false, fmt.Errorf("cannot compile log format with unmatched opening curly braces; rune %d", indexOpenCurlyBrace) } if !isPrevRuneNewline { buf = append(buf, '\n') // terminate each log line with newline byte } if len(buf) > 0 { emitters = append(emitters, makeStringEmitter(string(buf))) } return emitters, isTimeRequired, nil } func appendRune(buf *[]byte, r rune) { if r < utf8.RuneSelf { *buf = append(*buf, byte(r)) return } olen := len(*buf) *buf = append(*buf, 0, 0, 0, 0) // grow buf large enough to accommodate largest possible UTF8 sequence n := utf8.EncodeRune((*buf)[olen:olen+4], r) // encode rune into newly allocated buf space *buf = (*buf)[:olen+n] // trim buf to actual size used by rune addition } func epochEmitter(e *event, bb *[]byte) { *bb = append(*bb, strconv.FormatInt(e.when.UTC().Unix(), 10)...) } func levelEmitter(e *event, bb *[]byte) { *bb = append(*bb, e.level.String()...) } var program string func makeProgramEmitter() func(e *event, bb *[]byte) { if program == "" { var err error program, err = os.Executable() if err != nil { program = os.Args[0] } program = filepath.Base(program) } return func(e *event, bb *[]byte) { *bb = append(*bb, program...) } } func makeStringEmitter(value string) func(*event, *[]byte) { return func(_ *event, bb *[]byte) { *bb = append(*bb, value...) } } func makeLocalTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.Format(format)...) } } func makeUTCTimestampEmitter(format string) func(e *event, bb *[]byte) { return func(e *event, bb *[]byte) { *bb = append(*bb, e.when.UTC().Format(format)...) } } func messageEmitter(e *event, bb *[]byte) { *bb = append(*bb, strings.Join(e.prefix, "")...) // emit the event's prefix ??? *bb = append(*bb, fmt.Sprintf(e.format, e.args...)...) // followed by the event message }
random_line_split
gui_LMI.py
from __future__ import division import clr from PyQt5 import QtCore, QtWidgets, uic from PyQt5.QtGui import QPixmap,QImage,QStandardItem from PyQt5.QtCore import QThread , pyqtSignal from PyQt5.QtWidgets import QFileDialog ,QTableWidgetItem import time import inspect import sqlite3 import pandas import collections import cv2 import math import threading from socket import socket, AF_INET , SOCK_STREAM,SOL_SOCKET,SO_SNDBUF #import cv2 import numpy as np import win32api,win32con import os, sys import System import System.Drawing import socketClient #import time #from PyQt5 import * #import datetime import time qtCreatorFile = "window.ui" # Enter file here.导入文件 Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)#给两个变量赋值 class MyApp(QtWidgets.QMainWindow, Ui_MainWindow): #定义一个类 global flag #---初始化 def __init__(self): global cursor,conn ,dictPara ,flag,bf,rf,gf,yf #初始化 global sockPs,sockPr,sockNs,sockNr QtWidgets.QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.setupUi(self) #---加载sqlite3参数数据库,程序所有参数 conn = sqlite3.connect("3d.db") cursor = conn.cursor() self.sqlUpdate() #---主界面按钮 #数据复位,保存 ,操作面板 self.bDataReset.clicked.connect(self.bDataResetClick) self.bDataSave.clicked.connect(self.bDataSaveClick) self.bAuto.clicked.connect(self.bAutoClick) self.bReset.clicked.connect(self.bResetClick) self.bStop.clicked.connect(self.bStopClick) self.bRed.clicked.connect(self.bRedClick) self.bGreen.clicked.connect(self.bGreenClick) self.bYellow.clicked.connect(self.bYellowClick) self.bBuzz.clicked.connect(self.bBuzzClick) #---检测设定界面按钮 #浏览 self.bSelectDoc.clicked.connect(self.bSelectDocClick) #---视觉调试界面按钮 #实时,触发 self.bLUtrig.clicked.connect(self.bLUtrigClick) self.bLDtrig.clicked.connect(self.bLDtrigClick) self.bRUtrig.clicked.connect(self.bRUtrigClick) self.bRDtrig.clicked.connect(self.bRDtrigClick) self.bC1trig.clicked.connect(self.bC1trigClick) self.bC2trig.clicked.connect(self.bC2trigClick) self.bCyUp.clicked.connect(self.bCyUpClick) self.bCyDown.clicked.connect(self.bCyDownClick) self.bLposition0.clicked.connect(self.bLposition0Click) self.bLposition1.clicked.connect(self.bLposition1Click) self.bLposition2.clicked.connect(self.bLposition2Click) self.bRposition0.clicked.connect(self.bRposition0Click) self.bRposition1.clicked.connect(self.bRposition1Click) self.bRposition2.clicked.connect(self.bRposition2Click) self.bAxiSave.clicked.connect(self.bAxiSaveClick) #---历史数据界面按钮 self.bExport.clicked.connect(self.bExportClick) #---QT界面数据刷新线程 self.thread = MyThread() self.thread.setIdentity("thread1") self.thread.sinOut.connect(self.GUIfresh) self.thread.setVal(2) # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND ) #---QT界面初始化 #标定控件隐藏效果 # self.bPsheetToolCalcNext.hide() # self.bPplateToolCalcNext.hide() # self.bNsheetToolCalcNext.hide() # self.bNplateToolCalcNext.hide() # self.tR1Mspeed.setText("10") # self.tR2Mspeed.setText("10") # #视学调试,表格标题 # self.tableWidgetPs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetPp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) #产量数据 self.tTime.setText(dictPara['tTime']) self.tTotal.setText(dictPara['tTotal']) self.tGood.setText(dictPara['tGood']) self.tGoodRate.setText(dictPara['tGoodRate']) self.tBad.setText(dictPara['tBad']) self.tBadRate.setText(dictPara['tBadRate']) self.tFail.setText(dictPara['tFail']) #机器人IP参数 self.tLUip.setText(dictPara['tLUip']) self.tLDip.setText(dictPara['tLDip']) self.tRUip.setText(dictPara['tRUip']) self.tRDip.setText(dictPara['tRDip']) self.tC1ip.setText(dictPara['tC1ip']) self.tC2ip.setText(dictPara['tC2ip']) # 轴参数设置 self.tSpeed.setText(dictPara['tSpeed']) self.tPosion1.setText(dictPara['tPosion1']) self.tPosion2.setText(dictPara['tPosion2']) try: import PLC PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) except Exception as e: print(str(e)) #视觉判定参数 # 正极过渡片 self.tStandardH.setText(dictPara['tStandardH']) self.tStandardD.setText(dictPara['tStandardD']) self.tNGpath.setText(dictPara['tNGpath']) #---全局变量初始化 flag=0 rf=0 gf=0 yf=0 bf=0 #---启动子程序 try: sockPs=socketClient.connect(dictPara['tC1ip'],2003) sockPr=socketClient.connect(dictPara['tC1ip'],2004) sockNs=socketClient.connect(dictPara['tC2ip'],2005) sockNr=socketClient.connect(dictPara['tC2ip'],2006) except Exception as e: print(str(e)) # PLC.openSerial() #---事件 def closeEvent(self, event): global ctrMelfaRxM try: pass except: pass self.close() os._exit(0) #---主界面按钮事件 #---自动运行 def bAutoClick(self): pass #---暂停 def bStopClick(self): pass #---复位 def bResetClick(self): # PLC.on("100",8) # time.sleep(0.1) # PLC.off("100",8) pass #---红灯 def bRedClick(self): global flag,rf flag=1 if rf==0: PLC.on("100",9) rf=1 flag=0 return 0 if rf==1: PLC.off("100",9) rf=0 flag=0 pass #---绿灯 def bGreenClick(self): global flag,gf flag=1 if gf==0: PLC.on("100",10) gf=1 flag=0 return 0 time.sleep(0.1) if gf==1: PLC.off("100",10) gf==0 flag=0 pass pass #---黄灯 def bYellowClick(self): global flag,yf flag=1 if yf==0: PLC.on("100",11) yf=1 flag=0 return 0 if yf==1: PLC.off("100",11) yf=0 flag=0 pass pass #---蜂鸣器 def bBuzzClick(self): global flag,bf flag=1 if bf==0: PLC.on("100",12) bf=1 flag=0 return 0 if bf==1: PLC.off("100",12) bf=0 flag=0 pass pass #---重置数据 def bDataResetClick(self): global countPsheetFail,countNsheetFail,countPplateFail,countNplateFail global countPsheetOK,countPsheetNG,countNsheetOK,countNsheetNG,countPplateOK,countPplateNG,countNplateOK,countNplateNG currentTime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) self.tTime.setText(currentTime) self.tTotal.setText('0') self.tGood.setText('0') self.tGoodRate.setText('0') self.tBad.setText('0') self.tBadRate.setText('0') self.tFail.setText('0') self.outDebug("统计信息重置") #---保存数据 def bDataSaveClick(self): global paraName, conn,cursor,dictPara dictPara['tTime']=self.tTime.toPlainText() dictPara['tTotal']=self.tTotal.toPlainText() dictPara['tGood']=self.tGood.toPlainText() dictPara['tGoodRate']=self.tGoodRate.toPlainText() dictPara['tBad']=self.tBad.toPlainText()
dictPara['tLDip']=self.tLDip.toPlainText() dictPara['tRUip']=self.tRUip.toPlainText() dictPara['tRDip']=self.tRDip.toPlainText() dictPara['tC1ip']=self.tC1ip.toPlainText() dictPara['tC2ip']=self.tC2ip.toPlainText() dictPara['tSpeed']=self.tSpeed.toPlainText() dictPara['tPosion1']=self.tPosion1.toPlainText() dictPara['tPosion2']=self.tPosion2.toPlainText() dictPara['tStandardH']=self.tStandardH.toPlainText() dictPara['tStandardD']=self.tStandardD.toPlainText() dictPara['tNGpath']=self.tNGpath.toPlainText() # print(dictPara) for key in dictPara: cursor.execute("update para set data=? where name = ?",(dictPara[key],key,)) conn.commit() self.outDebug("系统参数保存") # b=win32api.MessageBox(0, "保存成功", "参数保存",win32con.MB_OK) #---检测设定界面,按钮事件 def bSelectDocClick(self): directory1 = QFileDialog.getExistingDirectory(self, "选取文件夹", "D:/") #起始路径 self.tNGpath.setText(directory1) self.outDebug("修改NG文件夹:"+directory1) # print(directory1) # img1=cv2.imread("arrayBmp0.bmp") # cv2.imwrite(directory1+"/arrayBmp3.bmp",img1) #---视觉调试界面,按钮事件 #---相机触发 #bTrigFunc相机触发功能函数 def bTrigFunc(self,sn,file,camNum,camName,visionStand): pass def bLUtrigClick(self): pass def bLDtrigClick(self): pass def bRUtrigClick(self): pass def bRDtrigClick(self): pass def bC1trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockPs,"TRG") msg=socketClient.rev(sockPr,1024) print("msg:",msg) self.tCode1.setText(str(msg)) pass def bC2trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockNs,"TRG") msg=socketClient.rev(sockNr,1024) print("msg:",msg) self.tCode2.setText(str(msg)) pass #---机台手动按钮 def bCyUpClick(self): global flag flag=1 PLC.on("100",6) time.sleep(0.1) PLC.off("100",7) flag=0 pass def bCyDownClick(self): global flag flag=1 PLC.on("100",7) time.sleep(0.1) PLC.off("100",6) flag=0 pass def bLposition0Click(self): global flag flag=1 PLC.on("100",0) time.sleep(0.1) PLC.off("100",0) flag=0 pass def bLposition1Click(self): global flag flag=1 PLC.on("100",1) time.sleep(0.1) PLC.off("100",1) flag=0 pass def bLposition2Click(self): global flag flag=1 PLC.on("100",2) time.sleep(0.1) PLC.off("100",2) flag=0 pass def bRposition0Click(self): global flag flag=1 PLC.on("100",3) time.sleep(0.1) PLC.off("100",3) flag=0 pass def bRposition1Click(self): global flag flag=1 PLC.on("100",4) time.sleep(0.1) PLC.off("100",4) flag=0 pass def bRposition2Click(self): global flag flag=1 PLC.on("100",5) time.sleep(0.1) PLC.off("100",5) flag=0 pass #---历史数据界面,按钮事件 def bExportClick(self): pass #---写入PLC数据界面,按钮事件 def bAxiSaveClick(self): PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) pass #---F关闭线程功能 def _async_raise(self,tid, exctype): """raises the exception, performs cleanup if needed""" tid = c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = pythonapi.PyThreadState_SetAsyncExc(tid, py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") def stop_thread(self,thread): self._async_raise(thread.ident, SystemExit) #---F程序跟踪功能 def outDebug(self,text): global fd # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND|os.O_CREAT ) # # Write one string # line = "[ "+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+" ]"+text+"\n" # # string needs to be converted byte object # b = str.encode(line) # os.write(fd, b) # # Close opened filer # os.close( fd) pass #---F数据库刷新功能 def sqlUpdate(self): global cursor, p ,conn ,dictPara,basler,sn cursor.execute('select * from para' ) value = cursor.fetchall() # print(value) dictPara={} for i in range(len(value)): dictPara[value[i][0]]=str(value[i][1]) #---F图片转换 def toPixImg(self,img1): img1Rgb=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) qimag1=QImage(img1Rgb[:],img1Rgb.shape[1], img1Rgb.shape[0],img1Rgb.shape[1] * 3, QImage.Format_RGB888) pixImg=QPixmap(QPixmap.fromImage(qimag1)) return pixImg #---F QT界面刷新程序 def GUIfresh(self): global flag con=[self.bLposition0,self.bLposition1,self.bLposition2,\ self.bRposition0,self.bRposition1,self.bRposition2,\ self.bCyUp,self.bCyDown,self.bRed,self.bGreen,self.bYellow,self.bBuzz] if flag==0: state=PLC.readState() for i in range(len(con)): if state[i]== True: con[i].setStyleSheet("background-color: rgb(0, 255, 0);") else: con[i].setStyleSheet("background-color: rgb(255, 0, 0);") self.bDataSaveClick() #MAINSELECT #---F QT界面刷新线程 class MyThread(QThread): sinOut = pyqtSignal(str) def __init__(self,parent=None): super(MyThread,self).__init__(parent) self.identity = None def setIdentity(self,text): self.identity = text def setVal(self,val): self.times = int(val) # 执行线程的run方法 self.start() def run(self): while self.times > 0 and self.identity: # 发射信号 self.sinOut.emit(self.identity+"==>"+str(self.times)) time.sleep(1) if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) mainWindow = MyApp() # visionWindow=Vison() # windowConn() # mainWindow.show() app.setActiveWindow(mainWindow) mainWindow.showMaximized() # mainWindow.showFullScreen() sys.exit(app.exec_())
dictPara['tBadRate']=self.tBadRate.toPlainText() dictPara['tFail']=self.tFail.toPlainText() dictPara['tLUip']=self.tLUip.toPlainText()
random_line_split
gui_LMI.py
from __future__ import division import clr from PyQt5 import QtCore, QtWidgets, uic from PyQt5.QtGui import QPixmap,QImage,QStandardItem from PyQt5.QtCore import QThread , pyqtSignal from PyQt5.QtWidgets import QFileDialog ,QTableWidgetItem import time import inspect import sqlite3 import pandas import collections import cv2 import math import threading from socket import socket, AF_INET , SOCK_STREAM,SOL_SOCKET,SO_SNDBUF #import cv2 import numpy as np import win32api,win32con import os, sys import System import System.Drawing import socketClient #import time #from PyQt5 import * #import datetime import time qtCreatorFile = "window.ui" # Enter file here.导入文件 Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)#给两个变量赋值 class MyApp(QtWidgets.QMainWindow, Ui_MainWindow): #定义一个类 global flag #---初始化 def __init__(self): global cursor,conn ,dictPara ,flag,bf,rf,gf,yf #初始化 global sockPs,sockPr,sockNs,sockNr QtWidgets.QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.setupUi(self) #---加载sqlite3参数数据库,程序所有参数 conn = sqlite3.connect("3d.db") cursor = conn.cursor() self.sqlUpdate() #---主界面按钮 #数据复位,保存 ,操作面板 self.bDataReset.clicked.connect(self.bDataResetClick) self.bDataSave.clicked.connect(self.bDataSaveClick) self.bAuto.clicked.connect(self.bAutoClick) self.bReset.clicked.connect(self.bResetClick) self.bStop.clicked.connect(self.bStopClick) self.bRed.clicked.connect(self.bRedClick) self.bGreen.clicked.connect(self.bGreenClick) self.bYellow.clicked.connect(self.bYellowClick) self.bBuzz.clicked.connect(self.bBuzzClick) #---检测设定界面按钮 #浏览 self.bSelectDoc.clicked.connect(self.bSelectDocClick) #---视觉调试界面按钮 #实时,触发 self.bLUtrig.clicked.connect(self.bLUtrigClick) self.bLDtrig.clicked.connect(self.bLDtrigClick) self.bRUtrig.clicked.connect(self.bRUtrigClick) self.bRDtrig.clicked.connect(self.bRDtrigClick) self.bC1trig.clicked.connect(self.bC1trigClick) self.bC2trig.clicked.connect(self.bC2trigClick) self.bCyUp.clicked.connect(self.bCyUpClick) self.bCyDown.clicked.connect(self.bCyDownClick) self.bLposition0.clicked.connect(self.bLposition0Click) self.bLposition1.clicked.connect(self.bLposition1Click) self.bLposition2.clicked.connect(self.bLposition2Click) self.bRposition0.clicked.connect(self.bRposition0Click) self.bRposition1.clicked.connect(self.bRposition1Click) self.bRposition2.clicked.connect(self.bRposition2Click) self.bAxiSave.clicked.connect(self.bAxiSaveClick) #---历史数据界面按钮 self.bExport.clicked.connect(self.bExportClick) #---QT界面数据刷新线程 self.thread = MyThread() self.thread.setIdentity("thread1") self.thread.sinOut.connect(self.GUIfresh) self.thread.setVal(2) # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND ) #---QT界面初始化 #标定控件隐藏效果 # self.bPsheetToolCalcNext.hide() # self.bPplateToolCalcNext.hide() # self.bNsheetToolCalcNext.hide() # self.bNplateToolCalcNext.hide() # self.tR1Mspeed.setText("10") # self.tR2Mspeed.setText("10") # #视学调试,表格标题 # self.tableWidgetPs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetPp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) #产量数据 self.tTime.setText(dictPara['tTime']) self.tTotal.setText(dictPara['tTotal']) self.tGood.setText(dictPara['tGood']) self.tGoodRate.setText(dictPara['tGoodRate']) self.tBad.setText(dictPara['tBad']) self.tBadRate.setText(dictPara['tBadRate']) self.tFail.setText(dictPara['tFail']) #机器人IP参数 self.tLUip.setText(dictPara['tLUip']) self.tLDip.setText(dictPara['tLDip']) self.tRUip.setText(dictPara['tRUip']) self.tRDip.setText(dictPara['tRDip']) self.tC1ip.setText(dictPara['tC1ip']) self.tC2ip.setText(dictPara['tC2ip']) # 轴参数设置 self.tSpeed.setText(dictPara['tSpeed']) self.tPosion1.setText(dictPara['tPosion1']) self.tPosion2.setText(dictPara['tPosion2']) try: import PLC PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) except Exception as e: print(str(e)) #视觉判定参数 # 正极过渡片 self.tStandardH.setText(dictPara['tStandardH']) self.tStandardD.setText(dictPara['tStandardD']) self.tNGpath.setText(dictPara['tNGpath']) #---全局变量初始化 flag=0 rf=0 gf=0 yf=0 bf=0 #---启动子程序 try: sockPs=socketClient.connect(dictPara['tC1ip'],2003) sockPr=socketClient.connect(dictPara['tC1ip'],2004) sockNs=socketClient.connect(dictPara['tC2ip'],2005) sockNr=socketClient.connect(dictPara['tC2ip'],2006) except Exception as e: print(str(e)) # PLC.openSerial() #---事件 def closeEvent(self, event): global ctrMelfaRxM try: pass except: pass self.close() os._exit(0) #---主界面按钮事件 #---自动运行 def bAutoClick(self): pass #---暂停 def bStopClick(self): pass #---复位 def bResetClick(self): # PLC.on("100",8) # time.sleep(0.1) # PLC.off("100",8) pass #---红灯 def bRedClick(self): global flag,rf flag=1 if rf==0: PLC.on("100",9) rf=1 flag=0 return 0 if rf==1: PLC.off("100",9) rf=0 flag=0 pass #---绿灯 def bGreenClick(self): global flag,gf flag=1 if gf==0: PLC.on("100",10) gf=1 flag=0 return 0 time.sleep(0.1) if gf==1: PLC.off("100",10) gf==0 flag=0 pass pass #---黄灯 def bYellowClick(self): global flag,yf flag=1 if yf==0: PLC.on("100",11) yf=1 flag=0 return 0 if yf==1: PLC.off("100",11) yf=0 flag=0 pass pass #---蜂鸣器 def bBuzzClick(self): global flag,bf flag=1 if bf==0: PLC.on("100",12) bf=1 flag=0 return 0 if bf==1: PLC.off("100",12) bf=0 flag=0 pass pass #---重置数据 def bDataResetClick(self): global countPsheetFail,countNsheetFail,countPplateFail,countNplateFail global countPsheetOK,countPsheetNG,countNsheetOK,countNsheetNG,countPplateOK,countPplateNG,countNplateOK,countNplateNG currentTime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) self.tTime.setText(currentTime) self.tTotal.setText('0') self.tGood.setText('0') self.tGoodRate.setText('0') self.tBad.setText('0') self.tBadRate.setText('0') self.tFail.setText('0') self.outDebug("统计信息重置") #---保存数据 def bDataSaveClick(self): global paraName, conn,cursor,dictPara dictPara['tTime']=self.tTime.toPlainText() dictPara['tTotal']=self.tTotal.toPlainText() dictPara['tGood']=self.tGood.toPlainText() dictPara['tGoodRate']=self.tGoodRate.toPlainText() dictPara['tBad']=self.tBad.toPlainText() dictPara['tBadRate']=self.tBadRate.toPlainText() dictPara['tFail']=self.tFail.toPlainText() dictPara['tLUip']=self.tLUip.toPlainText() dictPara['tLDip']=self.tLDip.toPlainText() dictPara['tRUip']=self.tRUip.toPlainText() dictPara['tRDip']=self.tRDip.toPlainText() dictPara['tC1ip']=self.tC1ip.toPlainText() dictPara['tC2ip']=self.tC2ip.toPlainText() dictPara['tSpeed']=self.tSpeed.toPlainText() dictPara['tPosion1']=self.tPosion1.toPlainText() dictPara['tPosion2']=self.tPosion2.toPlainText() dictPara['tStandardH']=self.tStandardH.toPlainText() dictPara['tStandardD']=self.tStandardD.toPlainText() dictPara['tNGpath']=self.tNGpath.toPlainText() # print(dictPara) for key in dictPara: cursor.execute("update para set data=? where name = ?",(dictPara[key],key,)) conn.commit() self.outDebug("系统参数保存") # b=win32api.MessageBox(0, "保存成功", "参数保存",win32con.MB_OK) #---检测设定界面,按钮事件 def bSelectDocClick(self): directory1 = QFileDialog.getExistingDirectory(self, "选取文件夹", "D:/") #起始路径 self.tNGpath.setText(directory1) self.outDebug("修改NG文件夹:"+directory1) # print(directory1) # img1=cv2.imread("arrayBmp0.bmp") # cv2.imwrite(directory1+"/arrayBmp3.bmp",img1) #---视觉调试界面,按钮事件 #---相机触发 #bTrigFunc相机触发功能函数 def bTrigFunc(self,sn,file,camNum,camName,visionStand): pass def bLUtrigClick(self): pass def bLDtrigClick(self): pass def bRUtrigClick(self): pass def bRDtrigClick(self): pass def bC1trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockPs,"TRG") msg=socketClient.rev(sockPr,1024) print("msg:",msg) self.tCode1.setText(str(msg)) pass def bC2trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockNs,"TRG") msg=socketClient.rev(sockNr,1024) print("msg:",msg) self.tCode2.setText(str(msg)) pass #---机台手动按钮 def bCyUpClick(self): global flag flag=1 PLC.on("100",6) time.sleep(0.1) PLC.off("100",7) flag=0 pass def bCyDownClick(self): global flag flag=1 PLC.on("100",7) time.sleep(0.1) PLC.off("100",6) flag=0 pass def bLposition0Click(self): global flag flag=1 PLC.on("100",0) time.sleep(0.1) PLC.off("100",0) flag=0 pass def bLposition1Click(self): global flag flag=1 PLC.on("100",1) time.sleep(0.1) PLC.off("100",1) flag=0 pass def bLposition2Click(self): global flag flag=1 PLC.on("100",2) time.sleep(0.1) PLC.off("100",2) flag=0 pass def bRposition0Click(self): global flag flag=1 PLC.on("100",3) time.sleep(0.1) PLC.off("100",3) flag=0 pass def bRposition1Click(self): global flag flag=1 PLC.on("100",4) time.sleep(0.1) PLC.off("100",4) flag=0 pass def bRposition2Click(self): global flag flag=1 PLC.on("100",5) time.sleep(0.1) PLC.off("100",5) flag=0 pass #---历史数据界面,按钮事件 def bExportClick(self): pass #---写入PLC数据界面,按钮事件 def bAxiSaveClick(self): PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) pass #---F关闭线程功能 def _async_raise(self,tid, exctype): """raises the exception, performs cleanup if needed""" tid = c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = pythonapi.PyThreadState_SetAsyncExc(tid, py_object(exctype)) if res == 0:
raise SystemError("PyThreadState_SetAsyncExc failed") def stop_thread(self,thread): self._async_raise(thread.ident, SystemExit) #---F程序跟踪功能 def outDebug(self,text): global fd # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND|os.O_CREAT ) # # Write one string # line = "[ "+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+" ]"+text+"\n" # # string needs to be converted byte object # b = str.encode(line) # os.write(fd, b) # # Close opened filer # os.close( fd) pass #---F数据库刷新功能 def sqlUpdate(self): global cursor, p ,conn ,dictPara,basler,sn cursor.execute('select * from para' ) value = cursor.fetchall() # print(value) dictPara={} for i in range(len(value)): dictPara[value[i][0]]=str(value[i][1]) #---F图片转换 def toPixImg(self,img1): img1Rgb=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) qimag1=QImage(img1Rgb[:],img1Rgb.shape[1], img1Rgb.shape[0],img1Rgb.shape[1] * 3, QImage.Format_RGB888) pixImg=QPixmap(QPixmap.fromImage(qimag1)) return pixImg #---F QT界面刷新程序 def GUIfresh(self): global flag con=[self.bLposition0,self.bLposition1,self.bLposition2,\ self.bRposition0,self.bRposition1,self.bRposition2,\ self.bCyUp,self.bCyDown,self.bRed,self.bGreen,self.bYellow,self.bBuzz] if flag==0: state=PLC.readState() for i in range(len(con)): if state[i]== True: con[i].setStyleSheet("background-color: rgb(0, 255, 0);") else: con[i].setStyleSheet("background-color: rgb(255, 0, 0);") self.bDataSaveClick() #MAINSELECT #---F QT界面刷新线程 class MyThread(QThread): sinOut = pyqtSignal(str) def __init__(self,parent=None): super(MyThread,self).__init__(parent) self.identity = None def setIdentity(self,text): self.identity = text def setVal(self,val): self.times = int(val) # 执行线程的run方法 self.start() def run(self): while self.times > 0 and self.identity: # 发射信号 self.sinOut.emit(self.identity+"==>"+str(self.times)) time.sleep(1) if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) mainWindow = MyApp() # visionWindow=Vison() # windowConn() # mainWindow.show() app.setActiveWindow(mainWindow) mainWindow.showMaximized() # mainWindow.showFullScreen() sys.exit(app.exec_())
raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" pythonapi.PyThreadState_SetAsyncExc(tid, None)
identifier_body
gui_LMI.py
from __future__ import division import clr from PyQt5 import QtCore, QtWidgets, uic from PyQt5.QtGui import QPixmap,QImage,QStandardItem from PyQt5.QtCore import QThread , pyqtSignal from PyQt5.QtWidgets import QFileDialog ,QTableWidgetItem import time import inspect import sqlite3 import pandas import collections import cv2 import math import threading from socket import socket, AF_INET , SOCK_STREAM,SOL_SOCKET,SO_SNDBUF #import cv2 import numpy as np import win32api,win32con import os, sys import System import System.Drawing import socketClient #import time #from PyQt5 import * #import datetime import time qtCreatorFile = "window.ui" # Enter file here.导入文件 Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)#给两个变量赋值 class MyApp(QtWidgets.QMainWindow, Ui_MainWindow): #定义一个类 global flag #---初始化 def __init__(self): global cursor,conn ,dictPara ,flag,bf,rf,gf,yf #初始化 global sockPs,sockPr,sockNs,sockNr QtWidgets.QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.setupUi(self) #---加载sqlite3参数数据库,程序所有参数 conn = sqlite3.connect("3d.db") cursor = conn.cursor() self.sqlUpdate() #---主界面按钮 #数据复位,保存 ,操作面板 self.bDataReset.clicked.connect(self.bDataResetClick) self.bDataSave.clicked.connect(self.bDataSaveClick) self.bAuto.clicked.connect(self.bAutoClick) self.bReset.clicked.connect(self.bResetClick) self.bStop.clicked.connect(self.bStopClick) self.bRed.clicked.connect(self.bRedClick) self.bGreen.clicked.connect(self.bGreenClick) self.bYellow.clicked.connect(self.bYellowClick) self.bBuzz.clicked.connect(self.bBuzzClick) #---检测设定界面按钮 #浏览 self.bSelectDoc.clicked.connect(self.bSelectDocClick) #---视觉调试界面按钮 #实时,触发 self.bLUtrig.clicked.connect(self.bLUtrigClick) self.bLDtrig.clicked.connect(self.bLDtrigClick) self.bRUtrig.clicked.connect(self.bRUtrigClick) self.bRDtrig.clicked.connect(self.bRDtrigClick) self.bC1trig.clicked.connect(self.bC1trigClick) self.bC2trig.clicked.connect(self.bC2trigClick) self.bCyUp.clicked.connect(self.bCyUpClick) self.bCyDown.clicked.connect(self.bCyDownClick) self.bLposition0.clicked.connect(self.bLposition0Click) self.bLposition1.clicked.connect(self.bLposition1Click) self.bLposition2.clicked.connect(self.bLposition2Click) self.bRposition0.clicked.connect(self.bRposition0Click) self.bRposition1.clicked.connect(self.bRposition1Click) self.bRposition2.clicked.connect(self.bRposition2Click) self.bAxiSave.clicked.connect(self.bAxiSaveClick) #---历史数据界面按钮 self.bExport.clicked.connect(self.bExportClick) #---QT界面数据刷新线程 self.thread = MyThread() self.thread.setIdentity("thread1") self.thread.sinOut.connect(self.GUIfresh) self.thread.setVal(2) # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND ) #---QT界面初始化 #标定控件隐藏效果 # self.bPsheetToolCalcNext.hide() # self.bPplateToolCalcNext.hide() # self.bNsheetToolCalcNext.hide() # self.bNplateToolCalcNext.hide() # self.tR1Mspeed.setText("10") # self.tR2Mspeed.setText("10") # #视学调试,表格标题 # self.tableWidgetPs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetPp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) #产量数据 self.tTime.setText(dictPara['tTime']) self.tTotal.setText(dictPara['tTotal']) self.tGood.setText(dictPara['tGood']) self.tGoodRate.setText(dictPara['tGoodRate']) self.tBad.setText(dictPara['tBad']) self.tBadRate.setText(dictPara['tBadRate']) self.tFail.setText(dictPara['tFail']) #机器人IP参数 self.tLUip.setText(dictPara['tLUip']) self.tLDip.setText(dictPara['tLDip']) self.tRUip.setText(dictPara['tRUip']) self.tRDip.setText(dictPara['tRDip']) self.tC1ip.setText(dictPara['tC1ip']) self.tC2ip.setText(dictPara['tC2ip']) # 轴参数设置 self.tSpeed.setText(dictPara['tSpeed']) self.tPosion1.setText(dictPara['tPosion1']) self.tPosion2.setText(dictPara['tPosion2']) try: import PLC PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) except Exception as e: print(str(e)) #视觉判定参数 # 正极过渡片 self.tStandardH.setText(dictPara['tStandardH']) self.tStandardD.setText(dictPara['tStandardD']) self.tNGpath.setText(dictPara['tNGpath']) #---全局变量初始化 flag=0 rf=0 gf=0 yf=0 bf=0 #---启动子程序 try: sockPs=socketClient.connect(dictPara['tC1ip'],2003) sockPr=socketClient.connect(dictPara['tC1ip'],2004) sockNs=socketClient.connect(dictPara['tC2ip'],2005) sockNr=socketClient.connect(dictPara['tC2ip'],2006) except Exception as e: print(str(e)) # PLC.openSerial() #---事件 def closeEvent(self, event): global ctrMelfaRxM try: pass except: pass self.close() os._exit(0) #---主界面按钮事件 #---自动运行 def bAutoClick(self): pass #---暂停 def bStopClick(self): pass #---复位 def bResetClick(self): # PLC.on("100",8) # time.sleep(0.1) # PLC.off("100",8) pass #---红灯 def bRedClick(self): global flag,rf flag=1 if rf==0: PLC.on("100",9) rf=1 flag=0 return 0 if rf==1: PLC.off("100",9) rf=0 flag=0 pass #---绿灯 def bGreenClick(self): global flag,gf flag=1 if gf==0: PLC.on("100",10) gf=1 flag=0 return 0 time.sleep(0.1) if gf==1: PLC.off("100",10) gf==0 flag=0 pass pass #---黄灯 def bYellowClick(self): global flag,yf flag=1 if yf==0: PLC.on("100",11) yf=1 flag=0 return 0 if yf==1: PLC.off("100",11) yf=0 flag=0 pass pass #---蜂鸣器 def bBuzzClick(self): global flag,bf flag=1 if bf==0: PLC.on("100",12) bf=1 flag=0 return 0 if bf==1: PLC.off("100",12) bf=0 flag=0 pass pass #---重置数据 def bDataResetClick(self): global countPsheetFail,countNsheetFail,countPplateFail,countNplateFail global countPsheetOK,countPsheetNG,countNsheetOK,countNsheetNG,countPplateOK,countPplateNG,countNplateOK,countNplateNG currentTime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) self.tTime.setText(currentTime) self.tTotal.setText('0') self.tGood.setText('0') self.tGoodRate.setText('0') self.tBad.setText('0') self.tBadRate.setText('0') self.tFail.setText('0') self.outDebug("统计信息重置") #---保存数据 def bDataSaveClick(self): global paraName, conn,cursor,dictPara dictPara['tTime']=self.tTime.toPlainText() dictPara['tTotal']=self.tTotal.toPlainText() dictPara['tGood']=self.tGood.toPlainText() dictPara['tGoodRate']=self.tGoodRate.toPlainText() dictPara['tBad']=self.tBad.toPlainText() dictPara['tBadRate']=self.tBadRate.toPlainText() dictPara['tFail']=self.tFail.toPlainText() dictPara['tLUip']=self.tLUip.toPlainText() dictPara['tLDip']=self.tLDip.toPlainText() dictPara['tRUip']=self.tRUip.toPlainText() dictPara['tRDip']=self.tRDip.toPlainText() dictPara['tC1ip']=self.tC1ip.toPlainText() dictPara['tC2ip']=self.tC2ip.toPlainText() dictPara['tSpeed']=self.tSpeed.toPlainText() dictPara['tPosion1']=self.tPosion1.toPlainText() dictPara['tPosion2']=self.tPosion2.toPlainText() dictPara['tStandardH']=self.tStandardH.toPlainText() dictPara['tStandardD']=self.tStandardD.toPlainText() dictPara['tNGpath']=self.tNGpath.toPlainText() # print(dictPara) for key in dictPara: cursor.execute("update para set data=? where name = ?",(dictPara[key],key,)) conn.commit() self.outDebug("系统参数保存") # b=win32api.MessageBox(0, "保存成功", "参数保存",win32con.MB_OK) #---检测设定界面,按钮事件 def bSelectDocClick(self): directory1 = QFileDialog.getExistingDirectory(self, "选取文件夹", "D:/") #起始路径 self.tNGpath.setText(directory1) self.outDebug("修改NG文件夹:"+directory1) # print(directory1) # img1=cv2.imread("arrayBmp0.bmp") # cv2.imwrite(directory1+"/arrayBmp3.bmp",img1) #---视觉调试界面,按钮事件 #---相机触发 #bTrigFunc相机触发功能函数 def bTrigFunc(self,sn,file,camNum,camName,visionStand): pass def bLUtrigClick(self): pass def bLDtrigClick(self): pass def bRUtrigClick(self): pass def bRDtrigClick(self): pass def bC1trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockPs,"TRG") msg=socketClient.rev(sockPr,1024) print("msg:",msg) self.tCode1.setText(str(msg)) pass def bC2trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockNs,"TRG") msg=socketClient.rev(sockNr,1024) print("msg:",msg) self.tCode2.setText(str(msg)) pass #---机台手动按钮 def bCyUpClick(self): global flag flag=1 PLC.on("100",6) time.sleep(0.1) PLC.off("100",7) flag=0 pass def bCyDownClick(self): global flag flag=1 PLC.on("100",7) time.sleep(0.1) PLC.off("100",6) flag=0 pass def bLposition0Click(self): global flag flag=1 PLC.on("100",0) time.sleep(0.1) PLC.off("100",0) flag=0 pass def bLposition1Click(self): global flag flag=1 PLC.on("100",1) time.sleep(0.1) PLC.off("100",1) flag=0 pass def bLposition2Click(self): global flag flag=1 PLC.on("100",2) time.sleep(0.1) PLC.off("100",2) flag=0 pass def bRposition0Click(self): global flag flag=1 PLC.on("100",3) time.sleep(0.1) PLC.off("100",3) flag=0 pass def bRposition1Click(self): global flag flag=1 PLC.on("100",4) time.sleep(0.1) PLC.off("100",4) flag=0 pass def bRposition2Click(self): global flag flag=1 PLC.on("100",5) time.sleep(0.1) PLC.off("100",5) flag=0 pass #---历史数据界面,按钮事件 def bExportClick(self): pass #---写入PLC数据界面,按钮事件 def bAxiSaveClick(self): PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) pass #---F关闭线程功能 def _async_raise(self,tid, exctype): """raises the exception, performs cleanup if needed""" tid = c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = pythonapi.PyThreadState_SetAsyncExc(tid, py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") def stop_thread(self,thread): self._async_raise(thread.ident, SystemExit) #---F程序跟踪功能 def outDebug(self,text): global fd # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND|os.O_CREAT ) # # Write one string # line = "[ "+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+" ]"+text+"\n" # # string needs to be converted byte object # b = str.encode(line) # os.write(fd, b) # # Close opened filer # os.close( fd) pass #---F数据库刷新功能 def sqlUpdate(self): global cursor, p ,conn ,dictPara,basler,sn cursor.execute('select * from para' ) value = cursor.fetchall() # print(value) dictPara={} for i in range(len(value)): dictPara[value[i][0]]=str(value[i][1]) #---F图片转换 def toPixImg(self,img1): img1Rgb=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) qimag1=QImage(img1Rgb[:],img1Rgb.shape[1], img1Rgb.shape[0],img1Rgb.shape[1] * 3, QImage.Format_RGB888) pixImg=QPixmap(QPixmap.fromImage(qimag1)) return pixImg #---F QT界面刷新程序 def GUIfresh(self): global flag con=[self.bLposition0,self.bLposition1,self.bLposition2,\ self.bRposition0,self.bRposition1,self.bRposition2,\ self.bCyUp,self.bCyDown,self.bRed,self.bGreen,self.bYellow,self.bBuzz] if flag==0: state=PLC.readState() for i in range(len(con)): if state[i]== True: con[i].setStyleSheet("background-color: rgb(0, 255, 0);") else: con[i].setStyleSheet("background-color: rgb(255, 0, 0);") self.bDataSaveClick() #MAINSELECT #---F QT界面刷新线程 class MyThread(QThread): sinOut = pyqtSignal(str) def __init__(self,parent=None): super(MyThread,self).__init__(parent) self.identity = None def setIdentity(self,text): self.identity = text def setVal(self,val): self.times = int(val) # 执行线程的run方法 self.start() def run(self): while self.times > 0 and self.identity: # 发射信号 self.sinOut.emit(self.identity+"==>"+str(self.times))
"__main__": app = QtWidgets.QApplication(sys.argv) mainWindow = MyApp() # visionWindow=Vison() # windowConn() # mainWindow.show() app.setActiveWindow(mainWindow) mainWindow.showMaximized() # mainWindow.showFullScreen() sys.exit(app.exec_())
time.sleep(1) if __name__ ==
conditional_block
gui_LMI.py
from __future__ import division import clr from PyQt5 import QtCore, QtWidgets, uic from PyQt5.QtGui import QPixmap,QImage,QStandardItem from PyQt5.QtCore import QThread , pyqtSignal from PyQt5.QtWidgets import QFileDialog ,QTableWidgetItem import time import inspect import sqlite3 import pandas import collections import cv2 import math import threading from socket import socket, AF_INET , SOCK_STREAM,SOL_SOCKET,SO_SNDBUF #import cv2 import numpy as np import win32api,win32con import os, sys import System import System.Drawing import socketClient #import time #from PyQt5 import * #import datetime import time qtCreatorFile = "window.ui" # Enter file here.导入文件 Ui_MainWindow, QtBaseClass = uic.loadUiType(qtCreatorFile)#给两个变量赋值 class MyApp(QtWidgets.QMainWindow, Ui_MainWindow): #定义一个类 global flag #---初始化 def __init__(self): global cursor,conn ,dictPara ,flag,bf,rf,gf,yf #初始化 global sockPs,sockPr,sockNs,sockNr QtWidgets.QMainWindow.__init__(self) Ui_MainWindow.__init__(self) self.setupUi(self) #---加载sqlite3参数数据库,程序所有参数 conn = sqlite3.connect("3d.db") cursor = conn.cursor() self.sqlUpdate() #---主界面按钮 #数据复位,保存 ,操作面板 self.bDataReset.clicked.connect(self.bDataResetClick) self.bDataSave.clicked.connect(self.bDataSaveClick) self.bAuto.clicked.connect(self.bAutoClick) self.bReset.clicked.connect(self.bResetClick) self.bStop.clicked.connect(self.bStopClick) self.bRed.clicked.connect(self.bRedClick) self.bGreen.clicked.connect(self.bGreenClick) self.bYellow.clicked.connect(self.bYellowClick) self.bBuzz.clicked.connect(self.bBuzzClick) #---检测设定界面按钮 #浏览 self.bSelectDoc.clicked.connect(self.bSelectDocClick) #---视觉调试界面按钮 #实时,触发 self.bLUtrig.clicked.connect(self.bLUtrigClick) self.bLDtrig.clicked.connect(self.bLDtrigClick) self.bRUtrig.clicked.connect(self.bRUtrigClick) self.bRDtrig.clicked.connect(self.bRDtrigClick) self.bC1trig.clicked.connect(self.bC1trigClick) self.bC2trig.clicked.connect(self.bC2trigClick) self.bCyUp.clicked.connect(self.bCyUpClick) self.bCyDown.clicked.connect(self.bCyDownClick) self.bLposition0.clicked.connect(self.bLposition0Click) self.bLposition1.clicked.connect(self.bLposition1Click) self.bLposition2.clicked.connect(self.bLposition2Click) self.bRposition0.clicked.connect(self.bRposition0Click) self.bRposition1.clicked.connect(self.bRposition1Click) self.bRposition2.clicked.connect(self.bRposition2Click) self.bAxiSave.clicked.connect(self.bAxiSaveClick) #---历史数据界面按钮 self.bExport.clicked.connect(self.bExportClick) #---QT界面数据刷新线程 self.thread = MyThread() self.thread.setIdentity("thread1") self.thread.sinOut.connect(self.GUIfresh) self.thread.setVal(2) # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND ) #---QT界面初始化 #标定控件隐藏效果 # self.bPsheetToolCalcNext.hide() # self.bPplateToolCalcNext.hide() # self.bNsheetToolCalcNext.hide() # self.bNplateToolCalcNext.hide() # self.tR1Mspeed.setText("10") # self.tR2Mspeed.setText("10") # #视学调试,表格标题 # self.tableWidgetPs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetPp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNs.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) # self.tableWidgetNp.setHorizontalHeaderLabels(['机器人X','机器人Y','相机X','相机Y']) #产量数据 self.tTime.setText(dictPara['tTime']) self.tTotal.setText(dictPara['tTotal']) self.tGood.setText(dictPara['tGood']) self.tGoodRate.setText(dictPara['tGoodRate']) self.tBad.setText(dictPara['tBad']) self.tBadRate.setText(dictPara['tBadRate']) self.tFail.setText(dictPara['tFail']) #机器人IP参数 self.tLUip.setText(dictPara['tLUip']) self.tLDip.setText(dictPara['tLDip']) self.tRUip.setText(dictPara['tRUip']) self.tRDip.setText(dictPara['tRDip']) self.tC1ip.setText(dictPara['tC1ip']) self.tC2ip.setText(dictPara['tC2ip']) # 轴参数设置 self.tSpeed.setText(dictPara['tSpeed']) self.tPosion1.setText(dictPara['tPosion1']) self.tPosion2.setText(dictPara['tPosion2']) try: import PLC PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) except Exception as e: print(str(e)) #视觉判定参数 # 正极过渡片 self.tStandardH.setText(dictPara['tStandardH']) self.tStandardD.setText(dictPara['tStandardD']) self.tNGpath.setText(dictPara['tNGpath']) #---全局变量初始化 flag=0 rf=0 gf=0 yf=0 bf=0 #---启动子程序 try: sockPs=socketClient.connect(dictPara['tC1ip'],2003) sockPr=socketClient.connect(dictPara['tC1ip'],2004) sockNs=socketClient.connect(dictPara['tC2ip'],2005) sockNr=socketClient.connect(dictPara['tC2ip'],2006) except Exception as e: print(str(e)) # PLC.openSerial() #---事件 def closeEvent(self, event): global ctrMelfaRxM try: pass except: pass self.close() os._exit(0) #---主界面按钮事件 #---自动运行 def bAutoClick(self): pass #---暂停 def bStopClick(self): pass #---复位 def bResetClick(self): # PLC.on("100",8) # time.sleep(0.1) # PLC.off("100",8) pass #---红灯 def bRedClick(self): global flag,rf flag=1 if rf==0: PLC.on("100",9) rf=1 flag=0 return 0 if rf==1: PLC.off("100",9) rf=0 flag=0 pass #---绿灯 def bGreenClick(self): global flag,gf flag=1 if gf==0: PLC.on("100",10) gf=1 flag=0 return 0 time.sleep(0.1) if gf==1: PLC.off("100",10) gf==0 flag=0 pass pass #---黄灯 def bYellowClick(self): global flag,yf flag=1 if yf==0: PLC.on("100",11) yf=1 flag=0 return 0 if yf==1: PLC.off("100",11) yf=0 flag=0 pass pass #---蜂鸣器 def bBuzzClick(self): global flag,bf flag=1 if bf==0: PLC.on("100",12) bf=1 flag=0 return 0 if bf==1: PLC.off("100",12) bf=0 flag=0 pass pass #---重置数据 def bDataResetClick(self): global countPsheetFail,countNsheetFail,countPplateFail,countNplateFail global countPsheetOK,countPsheetNG,countNsheetOK,countNsheetNG,countPplateOK,countPplateNG,countNplateOK,countNplateNG currentTime=time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) self.tTime.setText(currentTime) self.tTotal.setText('0') self.tGood.setText('0') self.tGoodRate.setText('0') self.tBad.setText('0') self.tBadRate.setText('0') self.tFail.setText('0') self.outDebug("统计信息重置") #---保存数据 def bDataSaveClick(self): global paraName, conn,cursor,dictPara dictPara['tTime']=self.tTime.toPlainText() dictPara['tTotal']=self.tTotal.toPlainText() dictPara['tGood']=self.tGood.toPlainText() dictPara['tGoodRate']=self.tGoodRate.toPlainText() dictPara['tBad']=self.tBad.toPlainText() dictPara['tBadRate']=self.tBadRate.toPlainText() dictPara['tFail']=self.tFail.toPlainText() dictPara['tLUip']=self.tLUip.toPlainText() dictPara['tLDip']=self.tLDip.toPlainText() dictPara['tRUip']=self.tRUip.toPlainText() dictPara['tRDip']=self.tRDip.toPlainText() dictPara['tC1ip']=self.tC1ip.toPlainText() dictPara['tC2ip']=self.tC2ip.toPlainText() dictPara['tSpeed']=self.tSpeed.toPlainText() dictPara['tPosion1']=self.tPosion1.toPlainText() dictPara['tPosion2']=self.tPosion2.toPlainText() dictPara['tStandardH']=self.tStandardH.toPlainText() dictPara['tStandardD']=self.tStandardD.toPlainText() dictPara['tNGpath']=self.tNGpath.toPlainText() # print(dictPara) for key in dictPara: cursor.execute("update para set data=? where name = ?",(dictPara[key],key,)) conn.commit() self.outDebug("系统参数保存") # b=win32api.MessageBox(0, "保存成功", "参数保存",win32con.MB_OK) #---检测设定界面,按钮事件 def bSelectDocClick(self): directory1 = QFileDialog.getExistingDirectory(self, "选取文件夹", "D:/") #起始路径 self.tNGpath.setText(directory1) self.outDebug("修改NG文件夹:"+directory1) # print(directory1) # img1=cv2.imread("arrayBmp0.bmp") # cv2.imwrite(directory1+"/arrayBmp3.bmp",img1) #---视觉调试界面,按钮事件 #---相机触发 #bTrigFunc相机触发功能函数 def bTrigFunc(self,sn,file,camNum,camName,visionStand): pass def bLUtrigClick(self): pass def bLDtrigClick(self): pass def bRUtrigClick(self): pass def bRDtrigClick(self): pass def bC1trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockPs,"TRG") msg=socketClient.rev(sockPr,1024) print("msg:",msg) self.tCode1.setText(str(msg)) pass def bC2trigClick(self): global sockPs,sockPr,sockNs,sockNr socketClient.sent(sockNs,"TRG") msg=socketClient.rev(sockNr,1024) print("msg:",msg) self.tCode2.setText(str(msg)) pass #---机台手动按钮 def bCyUpClick(self): global flag flag=1 PLC.on("100",6) time.sleep(0.1) PLC.off("100",7) flag=0 pass def bCyDownClick(self): global flag flag=1 PLC.on("100",7) time.sleep(0.1) PLC.off("100",6) flag=0 pass def bLposition0Click(self): global flag flag=1 PLC.on("100",0) time.sleep(0.1) PLC.off("100",0) flag=0 pass def bLposition1Click(self): global flag flag=1 PLC.on("100",1) time.sleep(0.1) PLC.off("100",1) flag=0 pass def bLposition2Click(self): global flag flag=1 PLC.on("100",2) time.sleep(0.1) PLC.off("100",2) flag=0 pass def bRposition0Click(self): global flag flag=1 PLC.on("100",3) time.sleep(0.1) PLC.off("100",3) flag=0 pass def bRposition1Click(self): global flag flag=1 PLC.on("100",4) time.sleep(0.1) PLC.off("100",4) flag=0 pass def bRposition2Click(self): global flag flag=1 PLC.on("100",5) time.sleep(0.1) PLC.off("100",5) flag=0 pass #---历史数据界面,按钮事件 def bExportClick(self): pass #---写入PLC数据界面,按钮事件 def bAxiSaveClick(self): PLC.write("101",hex(int(dictPara['tSpeed']))[2:len(hex(int(dictPara['tSpeed'])))]) PLC.write("102",hex(int(dictPara['tPosion1']))[2:len(hex(int(dictPara['tPosion1'])))]) PLC.write("103",hex(int(dictPara['tPosion2']))[2:len(hex(int(dictPara['tPosion2'])))]) pass #---F关闭线
def _async_raise(self,tid, exctype): """raises the exception, performs cleanup if needed""" tid = c_long(tid) if not inspect.isclass(exctype): exctype = type(exctype) res = pythonapi.PyThreadState_SetAsyncExc(tid, py_object(exctype)) if res == 0: raise ValueError("invalid thread id") elif res != 1: # """if it returns a number greater than one, you're in trouble, # and you should call it again with exc=NULL to revert the effect""" pythonapi.PyThreadState_SetAsyncExc(tid, None) raise SystemError("PyThreadState_SetAsyncExc failed") def stop_thread(self,thread): self._async_raise(thread.ident, SystemExit) #---F程序跟踪功能 def outDebug(self,text): global fd # fd = os.open( "debug.txt", os.O_RDWR|os.O_APPEND|os.O_CREAT ) # # Write one string # line = "[ "+time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))+" ]"+text+"\n" # # string needs to be converted byte object # b = str.encode(line) # os.write(fd, b) # # Close opened filer # os.close( fd) pass #---F数据库刷新功能 def sqlUpdate(self): global cursor, p ,conn ,dictPara,basler,sn cursor.execute('select * from para' ) value = cursor.fetchall() # print(value) dictPara={} for i in range(len(value)): dictPara[value[i][0]]=str(value[i][1]) #---F图片转换 def toPixImg(self,img1): img1Rgb=cv2.cvtColor(img1,cv2.COLOR_BGR2RGB) qimag1=QImage(img1Rgb[:],img1Rgb.shape[1], img1Rgb.shape[0],img1Rgb.shape[1] * 3, QImage.Format_RGB888) pixImg=QPixmap(QPixmap.fromImage(qimag1)) return pixImg #---F QT界面刷新程序 def GUIfresh(self): global flag con=[self.bLposition0,self.bLposition1,self.bLposition2,\ self.bRposition0,self.bRposition1,self.bRposition2,\ self.bCyUp,self.bCyDown,self.bRed,self.bGreen,self.bYellow,self.bBuzz] if flag==0: state=PLC.readState() for i in range(len(con)): if state[i]== True: con[i].setStyleSheet("background-color: rgb(0, 255, 0);") else: con[i].setStyleSheet("background-color: rgb(255, 0, 0);") self.bDataSaveClick() #MAINSELECT #---F QT界面刷新线程 class MyThread(QThread): sinOut = pyqtSignal(str) def __init__(self,parent=None): super(MyThread,self).__init__(parent) self.identity = None def setIdentity(self,text): self.identity = text def setVal(self,val): self.times = int(val) # 执行线程的run方法 self.start() def run(self): while self.times > 0 and self.identity: # 发射信号 self.sinOut.emit(self.identity+"==>"+str(self.times)) time.sleep(1) if __name__ == "__main__": app = QtWidgets.QApplication(sys.argv) mainWindow = MyApp() # visionWindow=Vison() # windowConn() # mainWindow.show() app.setActiveWindow(mainWindow) mainWindow.showMaximized() # mainWindow.showFullScreen() sys.exit(app.exec_())
程功能
identifier_name
mgmt.py
# import pythoncom # import win32serviceutil # import win32service # import win32event # import servicemanager # import socket # import time # import datetime # import sys # import os # import logging # import random # # from win32com.shell import shell, shellcon # import ntsecuritycon # import win32security # import win32gui # import win32ui # import win32con # import win32gui_struct # import win32ts # import win32process # import win32profile # import ctypes # import wmi # import traceback import win32trace import win32api import sys import os import traceback import util # Pull in logger first and set it up! from mgmt_EventLog import EventLog global LOGGER LOGGER = EventLog(os.path.join(util.LOG_FOLDER, 'ope-mgmt.log'), service_name="OPEMgmt") from color import p, set_log_level from mgmt_UserAccounts import UserAccounts from mgmt_FolderPermissions import FolderPermissions from mgmt_ScreenShot import ScreenShot from mgmt_RegistrySettings import RegistrySettings from mgmt_NetworkDevices import NetworkDevices from mgmt_CredentialProcess import CredentialProcess from mgmt_SystemTime import SystemTime from mgmt_GroupPolicy import GroupPolicy from mgmt_ProcessManagement import ProcessManagement from mgmt_Computer import Computer from mgmt_COMPorts import COMPorts from mgmt_LockScreen import LockScreen # Get the logging level value_name = "log_level" value = RegistrySettings.get_reg_value(app="OPEService", value_name=value_name, default=3, value_type="REG_DWORD") set_log_level(value) # Pre-declare - fill out later global valid_commands valid_commands = dict() def RunAsTraceCollector(): import sys try: import win32api win32api.SetConsoleTitle("Python Trace Collector") except: pass # Oh well! win32trace.InitRead() p("Collecting Python Trace Output...", log_level=4) try: while 1: # a short timeout means ctrl+c works next time we wake... sys.stdout.write(win32trace.blockingread(500)) except KeyboardInterrupt: p("}}ybCtrl+C - quitting...}}xx", log_level=3) def ensure_admin(): # Get the is in administrators, is uac, and username and return them return (UserAccounts.is_in_admin_group(), UserAccounts.is_uac_admin(), UserAccounts.get_current_user()) def show_version(): ver = CredentialProcess.get_mgmt_version() p("}}gbVersion: " + str(ver) + "}}xx") return True def show_help(): global LOGGER, valid_commands # Find the help key for this command cmd = util.get_param(1).lower() param1 = util.get_param(2).lower() if cmd == "" or param1 == "": # Missing required parameters! p("}}rnMissing Required Parameters! " + cmd + " - " + param1 + "}}xx", log_level=1) return False if not param1 in valid_commands: p("}}rnInvalid Command! " + param1 + "}}xx", log_level=1) commands = list(valid_commands.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") return False cmd_parts = valid_commands[param1] if cmd_parts is None: p("}}rnInvalid Command - not configured! " + param1 + "}}xx", log_level=1) return False help_msg = cmd_parts["help"] if help_msg is None: p("}}rnNo Help Provided! " + param1 + "}}xx", log_level=1) return False p("}}yb" + help_msg + "}}xx") return True valid_commands = { "help": { "function": show_help, "help": "Display help information for the specified command (e.g. mgmt.exe help set_log_level)", }, ### SETTINGS ### # Add self to system path "add_mgmt_to_system_path": { "function": RegistrySettings.add_mgmt_utility_to_path, "help": "Add the path to the mgmt.exe file to the system path for easier use" }, # Set log level "set_log_level": { "function": RegistrySettings.set_log_level, "help": "Adjust how verbose we want logging to be (default 3)" }, # Set registry/folder run timer "set_default_permissions_timer": { "function": RegistrySettings.set_default_permissions_timer, "help": "How often do you want permissions reset on folder/registry (default 3600)" }, # Set Frequency for scanning nics "set_scan_nics_timer": { "function": RegistrySettings.set_scan_nics_timer, "help": "How often do you want to scan nics for approved/disapproved nics (default 60)" }, # How often should service reload settings "set_reload_settings_timer": { "function": RegistrySettings.set_reload_settings_timer, "help": "How often should the service reload settings from the registry (default 30)" }, # Set how often to snap a screenshot "set_screen_shot_timer": { "function": RegistrySettings.set_screen_shot_timer, "help": "How often should we snap screen shots (default 30-300)" }, # Show service trace log "show_trace": { "function": RunAsTraceCollector, "help": "Show console logs for the OPEService" }, # Disable hostednetwork options on the wlan devices "disable_wlan_hosted_network": { "function": NetworkDevices.disable_wlan_hosted_network, "help": "Turn off hosted network options (nework sharing with other devices)" }, "enable_wlan_hosted_network": { "function": NetworkDevices.enable_wlan_hosted_network, "help": "Turn on hosted network options (nework sharing with other devices)" }, # Add/remove a nic from the approved list "approve_nic": { "function": NetworkDevices.approve_nic, "help": "Add a nic to the approved list - params include nic name (OR ID) and netowrk subnet it is approved on\n" + "NOTE: Subnet should be first part of address - it is a simple match (e.g. 202.5.222 for 202.5.222.34)\n" + "mgmt.exe add_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "remove_nic": { "function": NetworkDevices.remove_nic, "help": "Remove a nic from the approved list - need both nic name and network\n" + "mgmt.exe remove_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "list_approved_nics": { "function": NetworkDevices.list_approved_nics, "help": "Show a list of currently approved nics", }, "list_system_nics": { "function": NetworkDevices.list_system_nics, "help": "Show a list of nics plugged into the system and their hardware status" }, "get_machine_info": { "function": Computer.get_machine_info, "help": "Return some system information such as serial number" }, ### SECURITY COMMANDS ### # Snap a screen shot of the users desktop "screen_shot": { "function": ScreenShot.take_screenshot, "help": "Take a screen shot of the currently logged in user", }, # Lock down permissions to OPE folders "set_default_ope_folder_permissions": { "function": FolderPermissions.set_default_ope_folder_permissions, "help": "Reset permissions on %programdata%\\ope folders", }, # Lock down permissions to OPE registry entries "set_default_ope_registry_permissions": { "function": RegistrySettings.set_default_ope_registry_permissions, "help": "Reset permissions on OPE registry keys", }, # Fire when a device status changes (nic plugged in?) "device_event": { "function": NetworkDevices.device_event, "help": "A device changed (plugged in?) - do the appropriate steps to keep system secure (fired as event from OPEService)" }, # If any nics aren't in the approved list, disable them "scan_nics": { "function": NetworkDevices.scan_nics, "help": "Scan for nics that aren't approved and turn them off or on" }, # Disable com ports not on the approved list (none?) "scan_com_ports": { "function": COMPorts.scan_com_ports, "help": "Find and disable com ports that we don't want enabled" }, # Call to kill stuff if a credential fails mid-way (e.g. disable student users, lock things out) "bad_credential": { "function": UserAccounts.disable_student_accounts, "help": "If anything bad happens, make sure all student accounts are " + \ "disabled so they can't use the system if it is returned to them by mistake" }, # Apply group policy "apply_group_policy": { "function": GroupPolicy.apply_group_policy, "help": "Apply lock down windows group policy settings" }, # Reset to win default group policy "reset_group_policy": { "function": GroupPolicy.reset_group_policy_to_default, "help": "Reset group policy to windows default (remove security)" }, # Export the current group policy to a folder "export_group_policy": { "function": GroupPolicy.export_group_policy, "help": "Export the current group policy to a folder (e.g. mgmt export_group_policy exported_gpo )" }, # Apply firewall Policy "apply_firewall_policy": { "function": GroupPolicy.apply_firewall_policy, "help": "Lock down firewall with pre-defined policy" }, # Reset firewall policy to default "reset_firewall_policy": { "function": GroupPolicy.reset_firewall_policy, "help": "Reset firewall policy back to factory defaults" }, # Student Account Functions "create_student_account": { "function": UserAccounts.create_local_student_account, "help": "Create the student account in the windows system" }, "remove_account": { "function": UserAccounts.delete_user, "help": "Remove the windows account AND profile from the system (e.g. mgmt remove_account s777777)" }, "disable_account": { "function": UserAccounts.disable_account, "help": "Disable the specified windows account (e.g. mgmt disable_account s777777)" }, "enable_account": { "function": UserAccounts.enable_account, "help": "Enable the specified windows account (e.g. mgmt enabl_account s777777)" }, "disable_student_accounts": { "function": UserAccounts.disable_student_accounts, "help": "Disable ALL student accounts on this machine." }, # Remove student profile folder (delete files) "remove_account_profile": { "function": UserAccounts.remove_account_profile, "help": "Remove the windows profile for this account (e.g. mgmt remove_account_profile s777777)" }, # Download the OPE CA cert and add to the trusted list "trust_ope_certs": { "function": CredentialProcess.trust_ope_certs, "help": "Download CA crt from the OPE server and add to the trusted list" }, # Lock the screen for the current user "lock_screen": { "function": UserAccounts.lock_screen_for_user, "help": "Lock the screen. If no user specified, locks the current screen.", "require_admin": False }, "log_out_user": { "function": UserAccounts.log_out_user, "help": "Log out the specified user" }, "lock_boot_settings": { "function": FolderPermissions.lock_boot_settings, "help": "Lock down boot settings so that you can't use safe mode or restore features" }, "unlock_boot_settings": { "function": FolderPermissions.unlock_boot_settings, "help": "UnLock boot settings so that you can use restore features" }, "update_uefi_boot_order": { "function": FolderPermissions.update_uefi_boot_order, "help": "Update boot order for UEFI boot settings" }, "unlock_machine": { "function": CredentialProcess.unlock_machine, "help": "Disable student accounts and turn off security/policy/firewall settings - allow admins to plug in USB drive/etc..." }, "lock_machine": { "function": CredentialProcess.lock_machine, "help": "Turn security features back on and re-enable student account." },
"function": LockScreen.show_lock_screen_widget, "help": "Launch the lock screen widget which shoes current state of syncing/updates/etc..." }, "refresh_lock_screen_widget": { "function": LockScreen.refresh_lock_screen_widget, "help": "Update the lockscreen widget with the latest files and re-launch" }, #### Do credential process ### "credential_laptop": { "function": CredentialProcess.credential_laptop, "help": "Run the credential process to lock down this laptop" }, ### UPDATE/SYNC COMMANDS ### # Force a git pull "get_git_branch": { "function": RegistrySettings.get_git_branch, "help": "Get which branch to use when pulling updates from git repo" }, "set_git_branch": { "function": RegistrySettings.set_git_branch, "help": "Set which branch to use when pulling updates from git repo" }, "git_pull": { "function": ProcessManagement.git_pull_branch, "help": "Pull updates down from online or local SMC server" }, # Upgrade everything from the smc server and restart services (if online) "start_upgrade": { "function": CredentialProcess.start_upgrade_process, "help": "Start the OPE software update process - processes will be stopped/started automatically\nCan also use position arguments to specify git branch and force update (e.g. mgmt.exe start_upgrade master -f)" }, "finish_upgrade": { "function": CredentialProcess.finish_upgrade_process, "help": "Do follow-up steps after an upgrade (e.g. re-apply security, re-enable student account)" }, # Bounce of SMC and get the current password for this student and set it # in the local machine "sync_student_password": { "function": CredentialProcess.sync_student_password, "help": "Update the local login password from the server" }, # Send screenshots/logs/reports to SMC (if online) "sync_logs_to_smc": { "function": CredentialProcess.sync_logs_to_smc, "help": "Push log files and screen shots to SMC server", "require_admin": False }, # Sync users LMSApp Data w Canvas "sync_lms_app_data": { "function": CredentialProcess.sync_lms_app_data, "help": "Sync LMS App data in headless mode for the current student (auto sync)", "require_admin": False }, # Sync users work folder with SMC "sync_work_folder": { "function": CredentialProcess.sync_work_folder, "help": "Sync work folders for the student (e.g. sync work files to desktop)", "require_admin": False }, "sync_time": { "function": SystemTime.sync_time_w_ntp, "help": "Force time sync with the SMC server" }, "ping_smc": { "function": CredentialProcess.ping_smc, "help": "Connect to the SMC server to see if we have connection" }, "version": { "function": show_version, "help": "Display the version for the LMS software", "require_admin": False }, "test_cmd": { "function": util.test_params, "help": "Debugging command", "hide": True, }, } if __name__ == "__main__": # returns (is in admins, is uac, curr_user_name) is_admin = ensure_admin() # Parse Arguments cmd = util.get_param(1).lower() if cmd not in valid_commands: # Unknown Command?? p("}}rnInvalid Command! - " + str(cmd) + "}}xx", log_level=1) # Only show commands if UAC active if is_admin[1]: # Remove hidden commands print_cmds = {} for k in valid_commands.keys(): item = valid_commands[k] if not 'hide' in item or not item['hide'] is True: print_cmds[k]=item commands = sorted(print_cmds.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") sys.exit(1) # Run the function associated w the command cmd_parts = valid_commands[cmd] if cmd_parts is None: p("}}rnERROR - Command not avaialable " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) cmd_requires_admin = util.get_dict_value(cmd_parts, "require_admin", True) if cmd_requires_admin is True and is_admin[1] is not True: # Command requires elevation and this user doesn't have it! if is_admin[0] is not True: # User is NOT in the administrators group p("}}rbINVALID USER - Must be in the administrators group to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) if is_admin[1] is not True: # User is NOT running with UAC enabled p("}}rbINVALID USER - Must be in UAC prompt to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) sys.exit(2) # Get the function assigned to this command f = cmd_parts["function"] if f is None: p("}}rnERROR - No function assigned to command " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) exit_code = 0 try: util.CMD_FUNCTION = cmd p("}}gnRunning " + cmd + "}}xx", log_level=4) ret = f() #p("}}ynReturn Code: " + str(ret) + "}}xx") if ret is not None and ret != True: exit_code = -1 except Exception as ex: p("}}rnERROR: " + str(ex) + "}}xx", log_level=1) exit_code = 1 # Clean exit sys.exit(exit_code)
"show_lock_screen_widget": {
random_line_split
mgmt.py
# import pythoncom # import win32serviceutil # import win32service # import win32event # import servicemanager # import socket # import time # import datetime # import sys # import os # import logging # import random # # from win32com.shell import shell, shellcon # import ntsecuritycon # import win32security # import win32gui # import win32ui # import win32con # import win32gui_struct # import win32ts # import win32process # import win32profile # import ctypes # import wmi # import traceback import win32trace import win32api import sys import os import traceback import util # Pull in logger first and set it up! from mgmt_EventLog import EventLog global LOGGER LOGGER = EventLog(os.path.join(util.LOG_FOLDER, 'ope-mgmt.log'), service_name="OPEMgmt") from color import p, set_log_level from mgmt_UserAccounts import UserAccounts from mgmt_FolderPermissions import FolderPermissions from mgmt_ScreenShot import ScreenShot from mgmt_RegistrySettings import RegistrySettings from mgmt_NetworkDevices import NetworkDevices from mgmt_CredentialProcess import CredentialProcess from mgmt_SystemTime import SystemTime from mgmt_GroupPolicy import GroupPolicy from mgmt_ProcessManagement import ProcessManagement from mgmt_Computer import Computer from mgmt_COMPorts import COMPorts from mgmt_LockScreen import LockScreen # Get the logging level value_name = "log_level" value = RegistrySettings.get_reg_value(app="OPEService", value_name=value_name, default=3, value_type="REG_DWORD") set_log_level(value) # Pre-declare - fill out later global valid_commands valid_commands = dict() def RunAsTraceCollector(): import sys try: import win32api win32api.SetConsoleTitle("Python Trace Collector") except: pass # Oh well! win32trace.InitRead() p("Collecting Python Trace Output...", log_level=4) try: while 1: # a short timeout means ctrl+c works next time we wake... sys.stdout.write(win32trace.blockingread(500)) except KeyboardInterrupt: p("}}ybCtrl+C - quitting...}}xx", log_level=3) def ensure_admin(): # Get the is in administrators, is uac, and username and return them return (UserAccounts.is_in_admin_group(), UserAccounts.is_uac_admin(), UserAccounts.get_current_user()) def show_version(): ver = CredentialProcess.get_mgmt_version() p("}}gbVersion: " + str(ver) + "}}xx") return True def show_help():
valid_commands = { "help": { "function": show_help, "help": "Display help information for the specified command (e.g. mgmt.exe help set_log_level)", }, ### SETTINGS ### # Add self to system path "add_mgmt_to_system_path": { "function": RegistrySettings.add_mgmt_utility_to_path, "help": "Add the path to the mgmt.exe file to the system path for easier use" }, # Set log level "set_log_level": { "function": RegistrySettings.set_log_level, "help": "Adjust how verbose we want logging to be (default 3)" }, # Set registry/folder run timer "set_default_permissions_timer": { "function": RegistrySettings.set_default_permissions_timer, "help": "How often do you want permissions reset on folder/registry (default 3600)" }, # Set Frequency for scanning nics "set_scan_nics_timer": { "function": RegistrySettings.set_scan_nics_timer, "help": "How often do you want to scan nics for approved/disapproved nics (default 60)" }, # How often should service reload settings "set_reload_settings_timer": { "function": RegistrySettings.set_reload_settings_timer, "help": "How often should the service reload settings from the registry (default 30)" }, # Set how often to snap a screenshot "set_screen_shot_timer": { "function": RegistrySettings.set_screen_shot_timer, "help": "How often should we snap screen shots (default 30-300)" }, # Show service trace log "show_trace": { "function": RunAsTraceCollector, "help": "Show console logs for the OPEService" }, # Disable hostednetwork options on the wlan devices "disable_wlan_hosted_network": { "function": NetworkDevices.disable_wlan_hosted_network, "help": "Turn off hosted network options (nework sharing with other devices)" }, "enable_wlan_hosted_network": { "function": NetworkDevices.enable_wlan_hosted_network, "help": "Turn on hosted network options (nework sharing with other devices)" }, # Add/remove a nic from the approved list "approve_nic": { "function": NetworkDevices.approve_nic, "help": "Add a nic to the approved list - params include nic name (OR ID) and netowrk subnet it is approved on\n" + "NOTE: Subnet should be first part of address - it is a simple match (e.g. 202.5.222 for 202.5.222.34)\n" + "mgmt.exe add_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "remove_nic": { "function": NetworkDevices.remove_nic, "help": "Remove a nic from the approved list - need both nic name and network\n" + "mgmt.exe remove_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "list_approved_nics": { "function": NetworkDevices.list_approved_nics, "help": "Show a list of currently approved nics", }, "list_system_nics": { "function": NetworkDevices.list_system_nics, "help": "Show a list of nics plugged into the system and their hardware status" }, "get_machine_info": { "function": Computer.get_machine_info, "help": "Return some system information such as serial number" }, ### SECURITY COMMANDS ### # Snap a screen shot of the users desktop "screen_shot": { "function": ScreenShot.take_screenshot, "help": "Take a screen shot of the currently logged in user", }, # Lock down permissions to OPE folders "set_default_ope_folder_permissions": { "function": FolderPermissions.set_default_ope_folder_permissions, "help": "Reset permissions on %programdata%\\ope folders", }, # Lock down permissions to OPE registry entries "set_default_ope_registry_permissions": { "function": RegistrySettings.set_default_ope_registry_permissions, "help": "Reset permissions on OPE registry keys", }, # Fire when a device status changes (nic plugged in?) "device_event": { "function": NetworkDevices.device_event, "help": "A device changed (plugged in?) - do the appropriate steps to keep system secure (fired as event from OPEService)" }, # If any nics aren't in the approved list, disable them "scan_nics": { "function": NetworkDevices.scan_nics, "help": "Scan for nics that aren't approved and turn them off or on" }, # Disable com ports not on the approved list (none?) "scan_com_ports": { "function": COMPorts.scan_com_ports, "help": "Find and disable com ports that we don't want enabled" }, # Call to kill stuff if a credential fails mid-way (e.g. disable student users, lock things out) "bad_credential": { "function": UserAccounts.disable_student_accounts, "help": "If anything bad happens, make sure all student accounts are " + \ "disabled so they can't use the system if it is returned to them by mistake" }, # Apply group policy "apply_group_policy": { "function": GroupPolicy.apply_group_policy, "help": "Apply lock down windows group policy settings" }, # Reset to win default group policy "reset_group_policy": { "function": GroupPolicy.reset_group_policy_to_default, "help": "Reset group policy to windows default (remove security)" }, # Export the current group policy to a folder "export_group_policy": { "function": GroupPolicy.export_group_policy, "help": "Export the current group policy to a folder (e.g. mgmt export_group_policy exported_gpo )" }, # Apply firewall Policy "apply_firewall_policy": { "function": GroupPolicy.apply_firewall_policy, "help": "Lock down firewall with pre-defined policy" }, # Reset firewall policy to default "reset_firewall_policy": { "function": GroupPolicy.reset_firewall_policy, "help": "Reset firewall policy back to factory defaults" }, # Student Account Functions "create_student_account": { "function": UserAccounts.create_local_student_account, "help": "Create the student account in the windows system" }, "remove_account": { "function": UserAccounts.delete_user, "help": "Remove the windows account AND profile from the system (e.g. mgmt remove_account s777777)" }, "disable_account": { "function": UserAccounts.disable_account, "help": "Disable the specified windows account (e.g. mgmt disable_account s777777)" }, "enable_account": { "function": UserAccounts.enable_account, "help": "Enable the specified windows account (e.g. mgmt enabl_account s777777)" }, "disable_student_accounts": { "function": UserAccounts.disable_student_accounts, "help": "Disable ALL student accounts on this machine." }, # Remove student profile folder (delete files) "remove_account_profile": { "function": UserAccounts.remove_account_profile, "help": "Remove the windows profile for this account (e.g. mgmt remove_account_profile s777777)" }, # Download the OPE CA cert and add to the trusted list "trust_ope_certs": { "function": CredentialProcess.trust_ope_certs, "help": "Download CA crt from the OPE server and add to the trusted list" }, # Lock the screen for the current user "lock_screen": { "function": UserAccounts.lock_screen_for_user, "help": "Lock the screen. If no user specified, locks the current screen.", "require_admin": False }, "log_out_user": { "function": UserAccounts.log_out_user, "help": "Log out the specified user" }, "lock_boot_settings": { "function": FolderPermissions.lock_boot_settings, "help": "Lock down boot settings so that you can't use safe mode or restore features" }, "unlock_boot_settings": { "function": FolderPermissions.unlock_boot_settings, "help": "UnLock boot settings so that you can use restore features" }, "update_uefi_boot_order": { "function": FolderPermissions.update_uefi_boot_order, "help": "Update boot order for UEFI boot settings" }, "unlock_machine": { "function": CredentialProcess.unlock_machine, "help": "Disable student accounts and turn off security/policy/firewall settings - allow admins to plug in USB drive/etc..." }, "lock_machine": { "function": CredentialProcess.lock_machine, "help": "Turn security features back on and re-enable student account." }, "show_lock_screen_widget": { "function": LockScreen.show_lock_screen_widget, "help": "Launch the lock screen widget which shoes current state of syncing/updates/etc..." }, "refresh_lock_screen_widget": { "function": LockScreen.refresh_lock_screen_widget, "help": "Update the lockscreen widget with the latest files and re-launch" }, #### Do credential process ### "credential_laptop": { "function": CredentialProcess.credential_laptop, "help": "Run the credential process to lock down this laptop" }, ### UPDATE/SYNC COMMANDS ### # Force a git pull "get_git_branch": { "function": RegistrySettings.get_git_branch, "help": "Get which branch to use when pulling updates from git repo" }, "set_git_branch": { "function": RegistrySettings.set_git_branch, "help": "Set which branch to use when pulling updates from git repo" }, "git_pull": { "function": ProcessManagement.git_pull_branch, "help": "Pull updates down from online or local SMC server" }, # Upgrade everything from the smc server and restart services (if online) "start_upgrade": { "function": CredentialProcess.start_upgrade_process, "help": "Start the OPE software update process - processes will be stopped/started automatically\nCan also use position arguments to specify git branch and force update (e.g. mgmt.exe start_upgrade master -f)" }, "finish_upgrade": { "function": CredentialProcess.finish_upgrade_process, "help": "Do follow-up steps after an upgrade (e.g. re-apply security, re-enable student account)" }, # Bounce of SMC and get the current password for this student and set it # in the local machine "sync_student_password": { "function": CredentialProcess.sync_student_password, "help": "Update the local login password from the server" }, # Send screenshots/logs/reports to SMC (if online) "sync_logs_to_smc": { "function": CredentialProcess.sync_logs_to_smc, "help": "Push log files and screen shots to SMC server", "require_admin": False }, # Sync users LMSApp Data w Canvas "sync_lms_app_data": { "function": CredentialProcess.sync_lms_app_data, "help": "Sync LMS App data in headless mode for the current student (auto sync)", "require_admin": False }, # Sync users work folder with SMC "sync_work_folder": { "function": CredentialProcess.sync_work_folder, "help": "Sync work folders for the student (e.g. sync work files to desktop)", "require_admin": False }, "sync_time": { "function": SystemTime.sync_time_w_ntp, "help": "Force time sync with the SMC server" }, "ping_smc": { "function": CredentialProcess.ping_smc, "help": "Connect to the SMC server to see if we have connection" }, "version": { "function": show_version, "help": "Display the version for the LMS software", "require_admin": False }, "test_cmd": { "function": util.test_params, "help": "Debugging command", "hide": True, }, } if __name__ == "__main__": # returns (is in admins, is uac, curr_user_name) is_admin = ensure_admin() # Parse Arguments cmd = util.get_param(1).lower() if cmd not in valid_commands: # Unknown Command?? p("}}rnInvalid Command! - " + str(cmd) + "}}xx", log_level=1) # Only show commands if UAC active if is_admin[1]: # Remove hidden commands print_cmds = {} for k in valid_commands.keys(): item = valid_commands[k] if not 'hide' in item or not item['hide'] is True: print_cmds[k]=item commands = sorted(print_cmds.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") sys.exit(1) # Run the function associated w the command cmd_parts = valid_commands[cmd] if cmd_parts is None: p("}}rnERROR - Command not avaialable " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) cmd_requires_admin = util.get_dict_value(cmd_parts, "require_admin", True) if cmd_requires_admin is True and is_admin[1] is not True: # Command requires elevation and this user doesn't have it! if is_admin[0] is not True: # User is NOT in the administrators group p("}}rbINVALID USER - Must be in the administrators group to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) if is_admin[1] is not True: # User is NOT running with UAC enabled p("}}rbINVALID USER - Must be in UAC prompt to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) sys.exit(2) # Get the function assigned to this command f = cmd_parts["function"] if f is None: p("}}rnERROR - No function assigned to command " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) exit_code = 0 try: util.CMD_FUNCTION = cmd p("}}gnRunning " + cmd + "}}xx", log_level=4) ret = f() #p("}}ynReturn Code: " + str(ret) + "}}xx") if ret is not None and ret != True: exit_code = -1 except Exception as ex: p("}}rnERROR: " + str(ex) + "}}xx", log_level=1) exit_code = 1 # Clean exit sys.exit(exit_code)
global LOGGER, valid_commands # Find the help key for this command cmd = util.get_param(1).lower() param1 = util.get_param(2).lower() if cmd == "" or param1 == "": # Missing required parameters! p("}}rnMissing Required Parameters! " + cmd + " - " + param1 + "}}xx", log_level=1) return False if not param1 in valid_commands: p("}}rnInvalid Command! " + param1 + "}}xx", log_level=1) commands = list(valid_commands.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") return False cmd_parts = valid_commands[param1] if cmd_parts is None: p("}}rnInvalid Command - not configured! " + param1 + "}}xx", log_level=1) return False help_msg = cmd_parts["help"] if help_msg is None: p("}}rnNo Help Provided! " + param1 + "}}xx", log_level=1) return False p("}}yb" + help_msg + "}}xx") return True
identifier_body
mgmt.py
# import pythoncom # import win32serviceutil # import win32service # import win32event # import servicemanager # import socket # import time # import datetime # import sys # import os # import logging # import random # # from win32com.shell import shell, shellcon # import ntsecuritycon # import win32security # import win32gui # import win32ui # import win32con # import win32gui_struct # import win32ts # import win32process # import win32profile # import ctypes # import wmi # import traceback import win32trace import win32api import sys import os import traceback import util # Pull in logger first and set it up! from mgmt_EventLog import EventLog global LOGGER LOGGER = EventLog(os.path.join(util.LOG_FOLDER, 'ope-mgmt.log'), service_name="OPEMgmt") from color import p, set_log_level from mgmt_UserAccounts import UserAccounts from mgmt_FolderPermissions import FolderPermissions from mgmt_ScreenShot import ScreenShot from mgmt_RegistrySettings import RegistrySettings from mgmt_NetworkDevices import NetworkDevices from mgmt_CredentialProcess import CredentialProcess from mgmt_SystemTime import SystemTime from mgmt_GroupPolicy import GroupPolicy from mgmt_ProcessManagement import ProcessManagement from mgmt_Computer import Computer from mgmt_COMPorts import COMPorts from mgmt_LockScreen import LockScreen # Get the logging level value_name = "log_level" value = RegistrySettings.get_reg_value(app="OPEService", value_name=value_name, default=3, value_type="REG_DWORD") set_log_level(value) # Pre-declare - fill out later global valid_commands valid_commands = dict() def RunAsTraceCollector(): import sys try: import win32api win32api.SetConsoleTitle("Python Trace Collector") except: pass # Oh well! win32trace.InitRead() p("Collecting Python Trace Output...", log_level=4) try: while 1: # a short timeout means ctrl+c works next time we wake... sys.stdout.write(win32trace.blockingread(500)) except KeyboardInterrupt: p("}}ybCtrl+C - quitting...}}xx", log_level=3) def ensure_admin(): # Get the is in administrators, is uac, and username and return them return (UserAccounts.is_in_admin_group(), UserAccounts.is_uac_admin(), UserAccounts.get_current_user()) def
(): ver = CredentialProcess.get_mgmt_version() p("}}gbVersion: " + str(ver) + "}}xx") return True def show_help(): global LOGGER, valid_commands # Find the help key for this command cmd = util.get_param(1).lower() param1 = util.get_param(2).lower() if cmd == "" or param1 == "": # Missing required parameters! p("}}rnMissing Required Parameters! " + cmd + " - " + param1 + "}}xx", log_level=1) return False if not param1 in valid_commands: p("}}rnInvalid Command! " + param1 + "}}xx", log_level=1) commands = list(valid_commands.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") return False cmd_parts = valid_commands[param1] if cmd_parts is None: p("}}rnInvalid Command - not configured! " + param1 + "}}xx", log_level=1) return False help_msg = cmd_parts["help"] if help_msg is None: p("}}rnNo Help Provided! " + param1 + "}}xx", log_level=1) return False p("}}yb" + help_msg + "}}xx") return True valid_commands = { "help": { "function": show_help, "help": "Display help information for the specified command (e.g. mgmt.exe help set_log_level)", }, ### SETTINGS ### # Add self to system path "add_mgmt_to_system_path": { "function": RegistrySettings.add_mgmt_utility_to_path, "help": "Add the path to the mgmt.exe file to the system path for easier use" }, # Set log level "set_log_level": { "function": RegistrySettings.set_log_level, "help": "Adjust how verbose we want logging to be (default 3)" }, # Set registry/folder run timer "set_default_permissions_timer": { "function": RegistrySettings.set_default_permissions_timer, "help": "How often do you want permissions reset on folder/registry (default 3600)" }, # Set Frequency for scanning nics "set_scan_nics_timer": { "function": RegistrySettings.set_scan_nics_timer, "help": "How often do you want to scan nics for approved/disapproved nics (default 60)" }, # How often should service reload settings "set_reload_settings_timer": { "function": RegistrySettings.set_reload_settings_timer, "help": "How often should the service reload settings from the registry (default 30)" }, # Set how often to snap a screenshot "set_screen_shot_timer": { "function": RegistrySettings.set_screen_shot_timer, "help": "How often should we snap screen shots (default 30-300)" }, # Show service trace log "show_trace": { "function": RunAsTraceCollector, "help": "Show console logs for the OPEService" }, # Disable hostednetwork options on the wlan devices "disable_wlan_hosted_network": { "function": NetworkDevices.disable_wlan_hosted_network, "help": "Turn off hosted network options (nework sharing with other devices)" }, "enable_wlan_hosted_network": { "function": NetworkDevices.enable_wlan_hosted_network, "help": "Turn on hosted network options (nework sharing with other devices)" }, # Add/remove a nic from the approved list "approve_nic": { "function": NetworkDevices.approve_nic, "help": "Add a nic to the approved list - params include nic name (OR ID) and netowrk subnet it is approved on\n" + "NOTE: Subnet should be first part of address - it is a simple match (e.g. 202.5.222 for 202.5.222.34)\n" + "mgmt.exe add_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "remove_nic": { "function": NetworkDevices.remove_nic, "help": "Remove a nic from the approved list - need both nic name and network\n" + "mgmt.exe remove_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "list_approved_nics": { "function": NetworkDevices.list_approved_nics, "help": "Show a list of currently approved nics", }, "list_system_nics": { "function": NetworkDevices.list_system_nics, "help": "Show a list of nics plugged into the system and their hardware status" }, "get_machine_info": { "function": Computer.get_machine_info, "help": "Return some system information such as serial number" }, ### SECURITY COMMANDS ### # Snap a screen shot of the users desktop "screen_shot": { "function": ScreenShot.take_screenshot, "help": "Take a screen shot of the currently logged in user", }, # Lock down permissions to OPE folders "set_default_ope_folder_permissions": { "function": FolderPermissions.set_default_ope_folder_permissions, "help": "Reset permissions on %programdata%\\ope folders", }, # Lock down permissions to OPE registry entries "set_default_ope_registry_permissions": { "function": RegistrySettings.set_default_ope_registry_permissions, "help": "Reset permissions on OPE registry keys", }, # Fire when a device status changes (nic plugged in?) "device_event": { "function": NetworkDevices.device_event, "help": "A device changed (plugged in?) - do the appropriate steps to keep system secure (fired as event from OPEService)" }, # If any nics aren't in the approved list, disable them "scan_nics": { "function": NetworkDevices.scan_nics, "help": "Scan for nics that aren't approved and turn them off or on" }, # Disable com ports not on the approved list (none?) "scan_com_ports": { "function": COMPorts.scan_com_ports, "help": "Find and disable com ports that we don't want enabled" }, # Call to kill stuff if a credential fails mid-way (e.g. disable student users, lock things out) "bad_credential": { "function": UserAccounts.disable_student_accounts, "help": "If anything bad happens, make sure all student accounts are " + \ "disabled so they can't use the system if it is returned to them by mistake" }, # Apply group policy "apply_group_policy": { "function": GroupPolicy.apply_group_policy, "help": "Apply lock down windows group policy settings" }, # Reset to win default group policy "reset_group_policy": { "function": GroupPolicy.reset_group_policy_to_default, "help": "Reset group policy to windows default (remove security)" }, # Export the current group policy to a folder "export_group_policy": { "function": GroupPolicy.export_group_policy, "help": "Export the current group policy to a folder (e.g. mgmt export_group_policy exported_gpo )" }, # Apply firewall Policy "apply_firewall_policy": { "function": GroupPolicy.apply_firewall_policy, "help": "Lock down firewall with pre-defined policy" }, # Reset firewall policy to default "reset_firewall_policy": { "function": GroupPolicy.reset_firewall_policy, "help": "Reset firewall policy back to factory defaults" }, # Student Account Functions "create_student_account": { "function": UserAccounts.create_local_student_account, "help": "Create the student account in the windows system" }, "remove_account": { "function": UserAccounts.delete_user, "help": "Remove the windows account AND profile from the system (e.g. mgmt remove_account s777777)" }, "disable_account": { "function": UserAccounts.disable_account, "help": "Disable the specified windows account (e.g. mgmt disable_account s777777)" }, "enable_account": { "function": UserAccounts.enable_account, "help": "Enable the specified windows account (e.g. mgmt enabl_account s777777)" }, "disable_student_accounts": { "function": UserAccounts.disable_student_accounts, "help": "Disable ALL student accounts on this machine." }, # Remove student profile folder (delete files) "remove_account_profile": { "function": UserAccounts.remove_account_profile, "help": "Remove the windows profile for this account (e.g. mgmt remove_account_profile s777777)" }, # Download the OPE CA cert and add to the trusted list "trust_ope_certs": { "function": CredentialProcess.trust_ope_certs, "help": "Download CA crt from the OPE server and add to the trusted list" }, # Lock the screen for the current user "lock_screen": { "function": UserAccounts.lock_screen_for_user, "help": "Lock the screen. If no user specified, locks the current screen.", "require_admin": False }, "log_out_user": { "function": UserAccounts.log_out_user, "help": "Log out the specified user" }, "lock_boot_settings": { "function": FolderPermissions.lock_boot_settings, "help": "Lock down boot settings so that you can't use safe mode or restore features" }, "unlock_boot_settings": { "function": FolderPermissions.unlock_boot_settings, "help": "UnLock boot settings so that you can use restore features" }, "update_uefi_boot_order": { "function": FolderPermissions.update_uefi_boot_order, "help": "Update boot order for UEFI boot settings" }, "unlock_machine": { "function": CredentialProcess.unlock_machine, "help": "Disable student accounts and turn off security/policy/firewall settings - allow admins to plug in USB drive/etc..." }, "lock_machine": { "function": CredentialProcess.lock_machine, "help": "Turn security features back on and re-enable student account." }, "show_lock_screen_widget": { "function": LockScreen.show_lock_screen_widget, "help": "Launch the lock screen widget which shoes current state of syncing/updates/etc..." }, "refresh_lock_screen_widget": { "function": LockScreen.refresh_lock_screen_widget, "help": "Update the lockscreen widget with the latest files and re-launch" }, #### Do credential process ### "credential_laptop": { "function": CredentialProcess.credential_laptop, "help": "Run the credential process to lock down this laptop" }, ### UPDATE/SYNC COMMANDS ### # Force a git pull "get_git_branch": { "function": RegistrySettings.get_git_branch, "help": "Get which branch to use when pulling updates from git repo" }, "set_git_branch": { "function": RegistrySettings.set_git_branch, "help": "Set which branch to use when pulling updates from git repo" }, "git_pull": { "function": ProcessManagement.git_pull_branch, "help": "Pull updates down from online or local SMC server" }, # Upgrade everything from the smc server and restart services (if online) "start_upgrade": { "function": CredentialProcess.start_upgrade_process, "help": "Start the OPE software update process - processes will be stopped/started automatically\nCan also use position arguments to specify git branch and force update (e.g. mgmt.exe start_upgrade master -f)" }, "finish_upgrade": { "function": CredentialProcess.finish_upgrade_process, "help": "Do follow-up steps after an upgrade (e.g. re-apply security, re-enable student account)" }, # Bounce of SMC and get the current password for this student and set it # in the local machine "sync_student_password": { "function": CredentialProcess.sync_student_password, "help": "Update the local login password from the server" }, # Send screenshots/logs/reports to SMC (if online) "sync_logs_to_smc": { "function": CredentialProcess.sync_logs_to_smc, "help": "Push log files and screen shots to SMC server", "require_admin": False }, # Sync users LMSApp Data w Canvas "sync_lms_app_data": { "function": CredentialProcess.sync_lms_app_data, "help": "Sync LMS App data in headless mode for the current student (auto sync)", "require_admin": False }, # Sync users work folder with SMC "sync_work_folder": { "function": CredentialProcess.sync_work_folder, "help": "Sync work folders for the student (e.g. sync work files to desktop)", "require_admin": False }, "sync_time": { "function": SystemTime.sync_time_w_ntp, "help": "Force time sync with the SMC server" }, "ping_smc": { "function": CredentialProcess.ping_smc, "help": "Connect to the SMC server to see if we have connection" }, "version": { "function": show_version, "help": "Display the version for the LMS software", "require_admin": False }, "test_cmd": { "function": util.test_params, "help": "Debugging command", "hide": True, }, } if __name__ == "__main__": # returns (is in admins, is uac, curr_user_name) is_admin = ensure_admin() # Parse Arguments cmd = util.get_param(1).lower() if cmd not in valid_commands: # Unknown Command?? p("}}rnInvalid Command! - " + str(cmd) + "}}xx", log_level=1) # Only show commands if UAC active if is_admin[1]: # Remove hidden commands print_cmds = {} for k in valid_commands.keys(): item = valid_commands[k] if not 'hide' in item or not item['hide'] is True: print_cmds[k]=item commands = sorted(print_cmds.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") sys.exit(1) # Run the function associated w the command cmd_parts = valid_commands[cmd] if cmd_parts is None: p("}}rnERROR - Command not avaialable " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) cmd_requires_admin = util.get_dict_value(cmd_parts, "require_admin", True) if cmd_requires_admin is True and is_admin[1] is not True: # Command requires elevation and this user doesn't have it! if is_admin[0] is not True: # User is NOT in the administrators group p("}}rbINVALID USER - Must be in the administrators group to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) if is_admin[1] is not True: # User is NOT running with UAC enabled p("}}rbINVALID USER - Must be in UAC prompt to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) sys.exit(2) # Get the function assigned to this command f = cmd_parts["function"] if f is None: p("}}rnERROR - No function assigned to command " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) exit_code = 0 try: util.CMD_FUNCTION = cmd p("}}gnRunning " + cmd + "}}xx", log_level=4) ret = f() #p("}}ynReturn Code: " + str(ret) + "}}xx") if ret is not None and ret != True: exit_code = -1 except Exception as ex: p("}}rnERROR: " + str(ex) + "}}xx", log_level=1) exit_code = 1 # Clean exit sys.exit(exit_code)
show_version
identifier_name
mgmt.py
# import pythoncom # import win32serviceutil # import win32service # import win32event # import servicemanager # import socket # import time # import datetime # import sys # import os # import logging # import random # # from win32com.shell import shell, shellcon # import ntsecuritycon # import win32security # import win32gui # import win32ui # import win32con # import win32gui_struct # import win32ts # import win32process # import win32profile # import ctypes # import wmi # import traceback import win32trace import win32api import sys import os import traceback import util # Pull in logger first and set it up! from mgmt_EventLog import EventLog global LOGGER LOGGER = EventLog(os.path.join(util.LOG_FOLDER, 'ope-mgmt.log'), service_name="OPEMgmt") from color import p, set_log_level from mgmt_UserAccounts import UserAccounts from mgmt_FolderPermissions import FolderPermissions from mgmt_ScreenShot import ScreenShot from mgmt_RegistrySettings import RegistrySettings from mgmt_NetworkDevices import NetworkDevices from mgmt_CredentialProcess import CredentialProcess from mgmt_SystemTime import SystemTime from mgmt_GroupPolicy import GroupPolicy from mgmt_ProcessManagement import ProcessManagement from mgmt_Computer import Computer from mgmt_COMPorts import COMPorts from mgmt_LockScreen import LockScreen # Get the logging level value_name = "log_level" value = RegistrySettings.get_reg_value(app="OPEService", value_name=value_name, default=3, value_type="REG_DWORD") set_log_level(value) # Pre-declare - fill out later global valid_commands valid_commands = dict() def RunAsTraceCollector(): import sys try: import win32api win32api.SetConsoleTitle("Python Trace Collector") except: pass # Oh well! win32trace.InitRead() p("Collecting Python Trace Output...", log_level=4) try: while 1: # a short timeout means ctrl+c works next time we wake... sys.stdout.write(win32trace.blockingread(500)) except KeyboardInterrupt: p("}}ybCtrl+C - quitting...}}xx", log_level=3) def ensure_admin(): # Get the is in administrators, is uac, and username and return them return (UserAccounts.is_in_admin_group(), UserAccounts.is_uac_admin(), UserAccounts.get_current_user()) def show_version(): ver = CredentialProcess.get_mgmt_version() p("}}gbVersion: " + str(ver) + "}}xx") return True def show_help(): global LOGGER, valid_commands # Find the help key for this command cmd = util.get_param(1).lower() param1 = util.get_param(2).lower() if cmd == "" or param1 == "": # Missing required parameters! p("}}rnMissing Required Parameters! " + cmd + " - " + param1 + "}}xx", log_level=1) return False if not param1 in valid_commands: p("}}rnInvalid Command! " + param1 + "}}xx", log_level=1) commands = list(valid_commands.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") return False cmd_parts = valid_commands[param1] if cmd_parts is None: p("}}rnInvalid Command - not configured! " + param1 + "}}xx", log_level=1) return False help_msg = cmd_parts["help"] if help_msg is None: p("}}rnNo Help Provided! " + param1 + "}}xx", log_level=1) return False p("}}yb" + help_msg + "}}xx") return True valid_commands = { "help": { "function": show_help, "help": "Display help information for the specified command (e.g. mgmt.exe help set_log_level)", }, ### SETTINGS ### # Add self to system path "add_mgmt_to_system_path": { "function": RegistrySettings.add_mgmt_utility_to_path, "help": "Add the path to the mgmt.exe file to the system path for easier use" }, # Set log level "set_log_level": { "function": RegistrySettings.set_log_level, "help": "Adjust how verbose we want logging to be (default 3)" }, # Set registry/folder run timer "set_default_permissions_timer": { "function": RegistrySettings.set_default_permissions_timer, "help": "How often do you want permissions reset on folder/registry (default 3600)" }, # Set Frequency for scanning nics "set_scan_nics_timer": { "function": RegistrySettings.set_scan_nics_timer, "help": "How often do you want to scan nics for approved/disapproved nics (default 60)" }, # How often should service reload settings "set_reload_settings_timer": { "function": RegistrySettings.set_reload_settings_timer, "help": "How often should the service reload settings from the registry (default 30)" }, # Set how often to snap a screenshot "set_screen_shot_timer": { "function": RegistrySettings.set_screen_shot_timer, "help": "How often should we snap screen shots (default 30-300)" }, # Show service trace log "show_trace": { "function": RunAsTraceCollector, "help": "Show console logs for the OPEService" }, # Disable hostednetwork options on the wlan devices "disable_wlan_hosted_network": { "function": NetworkDevices.disable_wlan_hosted_network, "help": "Turn off hosted network options (nework sharing with other devices)" }, "enable_wlan_hosted_network": { "function": NetworkDevices.enable_wlan_hosted_network, "help": "Turn on hosted network options (nework sharing with other devices)" }, # Add/remove a nic from the approved list "approve_nic": { "function": NetworkDevices.approve_nic, "help": "Add a nic to the approved list - params include nic name (OR ID) and netowrk subnet it is approved on\n" + "NOTE: Subnet should be first part of address - it is a simple match (e.g. 202.5.222 for 202.5.222.34)\n" + "mgmt.exe add_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "remove_nic": { "function": NetworkDevices.remove_nic, "help": "Remove a nic from the approved list - need both nic name and network\n" + "mgmt.exe remove_nic \"Intel(R) 82579LM Gigabit Network Connection\" 202.5.222", }, "list_approved_nics": { "function": NetworkDevices.list_approved_nics, "help": "Show a list of currently approved nics", }, "list_system_nics": { "function": NetworkDevices.list_system_nics, "help": "Show a list of nics plugged into the system and their hardware status" }, "get_machine_info": { "function": Computer.get_machine_info, "help": "Return some system information such as serial number" }, ### SECURITY COMMANDS ### # Snap a screen shot of the users desktop "screen_shot": { "function": ScreenShot.take_screenshot, "help": "Take a screen shot of the currently logged in user", }, # Lock down permissions to OPE folders "set_default_ope_folder_permissions": { "function": FolderPermissions.set_default_ope_folder_permissions, "help": "Reset permissions on %programdata%\\ope folders", }, # Lock down permissions to OPE registry entries "set_default_ope_registry_permissions": { "function": RegistrySettings.set_default_ope_registry_permissions, "help": "Reset permissions on OPE registry keys", }, # Fire when a device status changes (nic plugged in?) "device_event": { "function": NetworkDevices.device_event, "help": "A device changed (plugged in?) - do the appropriate steps to keep system secure (fired as event from OPEService)" }, # If any nics aren't in the approved list, disable them "scan_nics": { "function": NetworkDevices.scan_nics, "help": "Scan for nics that aren't approved and turn them off or on" }, # Disable com ports not on the approved list (none?) "scan_com_ports": { "function": COMPorts.scan_com_ports, "help": "Find and disable com ports that we don't want enabled" }, # Call to kill stuff if a credential fails mid-way (e.g. disable student users, lock things out) "bad_credential": { "function": UserAccounts.disable_student_accounts, "help": "If anything bad happens, make sure all student accounts are " + \ "disabled so they can't use the system if it is returned to them by mistake" }, # Apply group policy "apply_group_policy": { "function": GroupPolicy.apply_group_policy, "help": "Apply lock down windows group policy settings" }, # Reset to win default group policy "reset_group_policy": { "function": GroupPolicy.reset_group_policy_to_default, "help": "Reset group policy to windows default (remove security)" }, # Export the current group policy to a folder "export_group_policy": { "function": GroupPolicy.export_group_policy, "help": "Export the current group policy to a folder (e.g. mgmt export_group_policy exported_gpo )" }, # Apply firewall Policy "apply_firewall_policy": { "function": GroupPolicy.apply_firewall_policy, "help": "Lock down firewall with pre-defined policy" }, # Reset firewall policy to default "reset_firewall_policy": { "function": GroupPolicy.reset_firewall_policy, "help": "Reset firewall policy back to factory defaults" }, # Student Account Functions "create_student_account": { "function": UserAccounts.create_local_student_account, "help": "Create the student account in the windows system" }, "remove_account": { "function": UserAccounts.delete_user, "help": "Remove the windows account AND profile from the system (e.g. mgmt remove_account s777777)" }, "disable_account": { "function": UserAccounts.disable_account, "help": "Disable the specified windows account (e.g. mgmt disable_account s777777)" }, "enable_account": { "function": UserAccounts.enable_account, "help": "Enable the specified windows account (e.g. mgmt enabl_account s777777)" }, "disable_student_accounts": { "function": UserAccounts.disable_student_accounts, "help": "Disable ALL student accounts on this machine." }, # Remove student profile folder (delete files) "remove_account_profile": { "function": UserAccounts.remove_account_profile, "help": "Remove the windows profile for this account (e.g. mgmt remove_account_profile s777777)" }, # Download the OPE CA cert and add to the trusted list "trust_ope_certs": { "function": CredentialProcess.trust_ope_certs, "help": "Download CA crt from the OPE server and add to the trusted list" }, # Lock the screen for the current user "lock_screen": { "function": UserAccounts.lock_screen_for_user, "help": "Lock the screen. If no user specified, locks the current screen.", "require_admin": False }, "log_out_user": { "function": UserAccounts.log_out_user, "help": "Log out the specified user" }, "lock_boot_settings": { "function": FolderPermissions.lock_boot_settings, "help": "Lock down boot settings so that you can't use safe mode or restore features" }, "unlock_boot_settings": { "function": FolderPermissions.unlock_boot_settings, "help": "UnLock boot settings so that you can use restore features" }, "update_uefi_boot_order": { "function": FolderPermissions.update_uefi_boot_order, "help": "Update boot order for UEFI boot settings" }, "unlock_machine": { "function": CredentialProcess.unlock_machine, "help": "Disable student accounts and turn off security/policy/firewall settings - allow admins to plug in USB drive/etc..." }, "lock_machine": { "function": CredentialProcess.lock_machine, "help": "Turn security features back on and re-enable student account." }, "show_lock_screen_widget": { "function": LockScreen.show_lock_screen_widget, "help": "Launch the lock screen widget which shoes current state of syncing/updates/etc..." }, "refresh_lock_screen_widget": { "function": LockScreen.refresh_lock_screen_widget, "help": "Update the lockscreen widget with the latest files and re-launch" }, #### Do credential process ### "credential_laptop": { "function": CredentialProcess.credential_laptop, "help": "Run the credential process to lock down this laptop" }, ### UPDATE/SYNC COMMANDS ### # Force a git pull "get_git_branch": { "function": RegistrySettings.get_git_branch, "help": "Get which branch to use when pulling updates from git repo" }, "set_git_branch": { "function": RegistrySettings.set_git_branch, "help": "Set which branch to use when pulling updates from git repo" }, "git_pull": { "function": ProcessManagement.git_pull_branch, "help": "Pull updates down from online or local SMC server" }, # Upgrade everything from the smc server and restart services (if online) "start_upgrade": { "function": CredentialProcess.start_upgrade_process, "help": "Start the OPE software update process - processes will be stopped/started automatically\nCan also use position arguments to specify git branch and force update (e.g. mgmt.exe start_upgrade master -f)" }, "finish_upgrade": { "function": CredentialProcess.finish_upgrade_process, "help": "Do follow-up steps after an upgrade (e.g. re-apply security, re-enable student account)" }, # Bounce of SMC and get the current password for this student and set it # in the local machine "sync_student_password": { "function": CredentialProcess.sync_student_password, "help": "Update the local login password from the server" }, # Send screenshots/logs/reports to SMC (if online) "sync_logs_to_smc": { "function": CredentialProcess.sync_logs_to_smc, "help": "Push log files and screen shots to SMC server", "require_admin": False }, # Sync users LMSApp Data w Canvas "sync_lms_app_data": { "function": CredentialProcess.sync_lms_app_data, "help": "Sync LMS App data in headless mode for the current student (auto sync)", "require_admin": False }, # Sync users work folder with SMC "sync_work_folder": { "function": CredentialProcess.sync_work_folder, "help": "Sync work folders for the student (e.g. sync work files to desktop)", "require_admin": False }, "sync_time": { "function": SystemTime.sync_time_w_ntp, "help": "Force time sync with the SMC server" }, "ping_smc": { "function": CredentialProcess.ping_smc, "help": "Connect to the SMC server to see if we have connection" }, "version": { "function": show_version, "help": "Display the version for the LMS software", "require_admin": False }, "test_cmd": { "function": util.test_params, "help": "Debugging command", "hide": True, }, } if __name__ == "__main__": # returns (is in admins, is uac, curr_user_name) is_admin = ensure_admin() # Parse Arguments cmd = util.get_param(1).lower() if cmd not in valid_commands: # Unknown Command?? p("}}rnInvalid Command! - " + str(cmd) + "}}xx", log_level=1) # Only show commands if UAC active if is_admin[1]: # Remove hidden commands print_cmds = {} for k in valid_commands.keys(): item = valid_commands[k] if not 'hide' in item or not item['hide'] is True: print_cmds[k]=item commands = sorted(print_cmds.keys()) p("}}yn Valid Commands: " + str(commands) + "}}xx") p("}}ybFor help - type mgmt.exe help (command)}}xx") sys.exit(1) # Run the function associated w the command cmd_parts = valid_commands[cmd] if cmd_parts is None:
cmd_requires_admin = util.get_dict_value(cmd_parts, "require_admin", True) if cmd_requires_admin is True and is_admin[1] is not True: # Command requires elevation and this user doesn't have it! if is_admin[0] is not True: # User is NOT in the administrators group p("}}rbINVALID USER - Must be in the administrators group to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) if is_admin[1] is not True: # User is NOT running with UAC enabled p("}}rbINVALID USER - Must be in UAC prompt to use this utility!\n" + "Attempt logged for user " + is_admin[2] + ".}}xx", log_level=1) sys.exit(2) sys.exit(2) # Get the function assigned to this command f = cmd_parts["function"] if f is None: p("}}rnERROR - No function assigned to command " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1) exit_code = 0 try: util.CMD_FUNCTION = cmd p("}}gnRunning " + cmd + "}}xx", log_level=4) ret = f() #p("}}ynReturn Code: " + str(ret) + "}}xx") if ret is not None and ret != True: exit_code = -1 except Exception as ex: p("}}rnERROR: " + str(ex) + "}}xx", log_level=1) exit_code = 1 # Clean exit sys.exit(exit_code)
p("}}rnERROR - Command not avaialable " + cmd + " - coming soon...}}xx", log_level=1) sys.exit(1)
conditional_block
ClientMobileApp.js
import React, { Fragment, useRef, useEffect, useState } from 'react'; import { Link, withRouter } from 'react-router-dom'; import Tilt from 'react-tilt' import RatingStars from './RatingStars'; import { useStoreState, useStoreDispatch } from 'easy-peasy'; import { logout } from '../../redux/actions/authActions'; import { showComponent, hideComponent } from '../../redux/actions/componentActions'; import ImageLogo from '../../components/ImageLogo'; import Login from '../../components/auth/Login'; // import LoadingThreeDots from '../../components/loadingIndicators/LoadingThreeDots'; import { convertDotToComma } from '../../utils/numbers/convertDotComma'; import animateNumber from '../../utils/numbers/animateNumber'; import getPercentage from '../../utils/numbers/getPercentage'; import ReactjsPercentageCircle from '../../components/progressIndicators/ReactjsPercentageCircle/ReactjsPercentageCircle'; import getDayGreetingBr from '../../utils/getDayGreetingBr'; // import checkIfElemIsVisible from '../../utils/window/checkIfElemIsVisible'; // SpeedDial and Icons import SpeedDialButton from '../../components/buttons/SpeedDialButton'; import showVanillaToast from '../../components/vanilla-js/toastify/showVanillaToast'; import LoyaltyIcon from '@material-ui/icons/Loyalty'; import ExitToAppIcon from '@material-ui/icons/ExitToApp'; // End SpeedDial and Incons import "./ellipse.css"; const maxScore = 500; function ClientMobileApp({ history })
rt default withRouter(ClientMobileApp); /* {loading ? ( <LoadingThreeDots color="white" /> ) : ( )} */ /* <div className="my-3 container-center"> <img src="/img/official-logo.jpg" alt="logo" width={300} height="auto"/> </div> */
{ const userScoreRef = useRef(null); // const [showMoreBtn, setShowMoreBtn] = useState(false); const [showPercentage, setShowPercentage] = useState(false); let { isUserAuth, role, loyaltyScores, userName } = useStoreState(state => ({ isUserAuth: state.authReducer.cases.isUserAuthenticated, role: state.userReducer.cases.currentUser.role, userName: state.userReducer.cases.currentUser.name, loyaltyScores: state.userReducer.cases.currentUser.loyaltyScores, })) // checkIfElemIsVisible("#rules", setShowMoreBtn) const dispatch = useStoreDispatch(); const userScore = loyaltyScores && loyaltyScores.currentScore; const userLastScore = loyaltyScores && loyaltyScores.cashCurrentScore; useEffect(() => { if(isUserAuth && role === "cliente") { animateNumber( userScoreRef.current, 0, userScore, 3000, setShowPercentage ); } }, [role, isUserAuth]) const playBeep = () => { // Not working const elem = document.querySelector("#appBtn"); elem.play(); } const showLogin = () => ( <div className="my-5"> <div className="mb-3 text-white text-em-2-5 text-center text-default"> Faça seu acesso. </div> <div className="margin-auto-60"> <Login /> </div> </div> ); const showGreeting = () => ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse"></div> <div style={{position: 'absolute', top: '-5px'}} className="ml-2 mb-2 text-white text-shadow text-em-1-4 text-left text-default"> {getDayGreetingBr()},<br/> <span className="text-em-1-8">{userName.cap() + "!"}</span> </div> </section> ); const showScores = () => { const displayAllScores = () => ( <Fragment> Fidelidômetro:<br/> <div className="d-flex justify-content-center"> <p ref={userScoreRef}>...</p> <p className="ml-3">Pontos</p> </div> {/*LAST SCORE*/} {userScore === 0 || !userScore || !showPercentage ? null : ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse2"></div> <div style={{ zIndex: 10, color: 'var(--mainPink)', position: 'absolute', top: '-5px', left: '220px'}} className="text-em-0-5 text-nowrap" > Última pontuação:<br /> <span className="text-em-1"> <strong>{convertDotToComma(userLastScore)}</strong> </span> </div> </section> )} </Fragment> ); const displayGift = () => ( <Fragment> {userScore >= maxScore ? ( <div> <p>Parabéns!<br />Você ganhou um prêmio.</p> <img className="animated bounce" style={{animationIterationCount: 20}} src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> </div> ) : ( <div> <div className="position-relative mt-4"> <img style={{opacity: '.5'}} className="animated bounce" src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> <p className="text-em-2" style={{position: 'absolute', top: '10px', left: '48%'}}>?</p> </div> </div> )} </Fragment> ); const displayPercentageCircleAndGift = () => ( <Fragment> {showPercentage ? ( <Fragment> <Tilt className="Tilt" options={{ max : 90, reverse: true }} > <div className="container-center animated zoomIn"> <ReactjsPercentageCircle percent={getPercentage(maxScore, userScore)} color="var(--mainPink)" radius={70} borderWidth={10} textStyle="text-pink text-em-1-2" /> </div> </Tilt> {displayGift()} </Fragment> ) : null} </Fragment> ); return( <div className="my-3 text-white text-em-2-5 text-center text-default"> {displayAllScores()} {displayPercentageCircleAndGift()} </div> ); }; const showRules = () => ( <Link to="/regulamento"> <div onClick={playBeep} id="rules" className="text-container font-weight-italic text-center" style={{color: "var(--mainPink)", cursor: "pointer"}} > Consulte<br />as Regras Aqui </div> </Link> ); const showMoreOptionsBtn = () => { const speedDial = { actions: [ //the order rendered is inverse from the bottom to top { icon: <ExitToAppIcon />, name: 'Desconectar', backColor: 'var(--mainPink)', onClick: () => { logout(dispatch); playBeep(); } }, { icon: <LoyaltyIcon />, name: 'Adicionar Pontos', backColor: 'var(--mainPink)', onClick: () => { hideComponent(dispatch, "login"); showComponent(dispatch, "purchaseValue"); history.push("/cliente/pontos-fidelidade"); playBeep(); }, }, ] } return( <SpeedDialButton actions={speedDial.actions} tooltipOpen={true} size="large" FabProps={{ backgroundColor: 'var(--mainPink)', size: 'medium', }} root={{ bottom: '30px', right: '40px', }} hidden={false} /> ); } return ( <div> <div className="margin-auto-90"> <ImageLogo /> </div> <section> {isUserAuth && role === "cliente" ? ( <Fragment> <br/> <br/> {showGreeting()} {showScores()} <div className="mb-4"> <RatingStars score={userScore} /> </div> <div className="mb-4"> {showRules()} </div> {showMoreOptionsBtn()} <audio id="appBtn" src="https://ia601500.us.archive.org/29/items/confirmation-keypad-sound/confirmation-keypad-sound.wav"></audio> </Fragment> ) : showLogin()} </section> <p className="text-small text-left text-white">v4.6.8</p> </div> ); } expo
identifier_body
ClientMobileApp.js
import React, { Fragment, useRef, useEffect, useState } from 'react'; import { Link, withRouter } from 'react-router-dom'; import Tilt from 'react-tilt' import RatingStars from './RatingStars'; import { useStoreState, useStoreDispatch } from 'easy-peasy'; import { logout } from '../../redux/actions/authActions'; import { showComponent, hideComponent } from '../../redux/actions/componentActions'; import ImageLogo from '../../components/ImageLogo'; import Login from '../../components/auth/Login'; // import LoadingThreeDots from '../../components/loadingIndicators/LoadingThreeDots'; import { convertDotToComma } from '../../utils/numbers/convertDotComma'; import animateNumber from '../../utils/numbers/animateNumber'; import getPercentage from '../../utils/numbers/getPercentage'; import ReactjsPercentageCircle from '../../components/progressIndicators/ReactjsPercentageCircle/ReactjsPercentageCircle'; import getDayGreetingBr from '../../utils/getDayGreetingBr'; // import checkIfElemIsVisible from '../../utils/window/checkIfElemIsVisible'; // SpeedDial and Icons import SpeedDialButton from '../../components/buttons/SpeedDialButton'; import showVanillaToast from '../../components/vanilla-js/toastify/showVanillaToast'; import LoyaltyIcon from '@material-ui/icons/Loyalty'; import ExitToAppIcon from '@material-ui/icons/ExitToApp'; // End SpeedDial and Incons import "./ellipse.css"; const maxScore = 500; function ClientMobileApp({ history }) { const userScoreRef = useRef(null); // const [showMoreBtn, setShowMoreBtn] = useState(false); const [showPercentage, setShowPercentage] = useState(false); let { isUserAuth, role, loyaltyScores, userName } = useStoreState(state => ({ isUserAuth: state.authReducer.cases.isUserAuthenticated, role: state.userReducer.cases.currentUser.role, userName: state.userReducer.cases.currentUser.name, loyaltyScores: state.userReducer.cases.currentUser.loyaltyScores, })) // checkIfElemIsVisible("#rules", setShowMoreBtn) const dispatch = useStoreDispatch(); const userScore = loyaltyScores && loyaltyScores.currentScore; const userLastScore = loyaltyScores && loyaltyScores.cashCurrentScore; useEffect(() => { if(isUserAuth && role === "cliente") { animateNumber( userScoreRef.current, 0, userScore, 3000, setShowPercentage ); } }, [role, isUserAuth]) const playBeep = () => { // Not working const elem = document.querySelector("#appBtn"); elem.play(); } const showLogin = () => ( <div className="my-5"> <div className="mb-3 text-white text-em-2-5 text-center text-default"> Faça seu acesso. </div> <div className="margin-auto-60"> <Login /> </div> </div> ); const showGreeting = () => ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse"></div> <div style={{position: 'absolute', top: '-5px'}} className="ml-2 mb-2 text-white text-shadow text-em-1-4 text-left text-default"> {getDayGreetingBr()},<br/> <span className="text-em-1-8">{userName.cap() + "!"}</span> </div> </section> ); const showScores = () => { const displayAllScores = () => ( <Fragment> Fidelidômetro:<br/> <div className="d-flex justify-content-center"> <p ref={userScoreRef}>...</p> <p className="ml-3">Pontos</p> </div> {/*LAST SCORE*/} {userScore === 0 || !userScore || !showPercentage ? null : ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse2"></div> <div style={{ zIndex: 10, color: 'var(--mainPink)', position: 'absolute', top: '-5px', left: '220px'}} className="text-em-0-5 text-nowrap" > Última pontuação:<br /> <span className="text-em-1"> <strong>{convertDotToComma(userLastScore)}</strong> </span> </div> </section> )} </Fragment> ); const displayGift = () => ( <Fragment> {userScore >= maxScore ? ( <div> <p>Parabéns!<br />Você ganhou um prêmio.</p> <img className="animated bounce" style={{animationIterationCount: 20}} src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> </div> ) : ( <div> <div className="position-relative mt-4"> <img style={{opacity: '.5'}} className="animated bounce" src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> <p className="text-em-2" style={{position: 'absolute', top: '10px', left: '48%'}}>?</p> </div> </div> )} </Fragment> ); const displayPercentageCircleAndGift = () => ( <Fragment> {showPercentage ? ( <Fragment> <Tilt className="Tilt" options={{ max : 90, reverse: true }} > <div className="container-center animated zoomIn"> <ReactjsPercentageCircle percent={getPercentage(maxScore, userScore)} color="var(--mainPink)" radius={70} borderWidth={10} textStyle="text-pink text-em-1-2" /> </div> </Tilt> {displayGift()} </Fragment> ) : null} </Fragment> ); return( <div className="my-3 text-white text-em-2-5 text-center text-default"> {displayAllScores()} {displayPercentageCircleAndGift()} </div> ); }; const showRules = () => ( <Link to="/regulamento"> <div onClick={playBeep} id="rules" className="text-container font-weight-italic text-center" style={{color: "var(--mainPink)", cursor: "pointer"}} > Consulte<br />as Regras Aqui </div> </Link> ); const showMoreOptionsBtn = () => { const speedDial = { actions: [ //the order rendered is inverse from the bottom to top { icon: <ExitToAppIcon />, name: 'Desconectar', backColor: 'var(--mainPink)', onClick: () => { logout(dispatch); playBeep(); } }, { icon: <LoyaltyIcon />, name: 'Adicionar Pontos', backColor: 'var(--mainPink)', onClick: () => { hideComponent(dispatch, "login"); showComponent(dispatch, "purchaseValue"); history.push("/cliente/pontos-fidelidade"); playBeep(); }, }, ] } return( <SpeedDialButton actions={speedDial.actions} tooltipOpen={true} size="large" FabProps={{ backgroundColor: 'var(--mainPink)', size: 'medium', }} root={{ bottom: '30px', right: '40px', }} hidden={false} /> ); } return ( <div> <div className="margin-auto-90"> <ImageLogo /> </div> <section> {isUserAuth && role === "cliente" ? ( <Fragment> <br/> <br/> {showGreeting()} {showScores()} <div className="mb-4"> <RatingStars score={userScore} /> </div> <div className="mb-4"> {showRules()} </div> {showMoreOptionsBtn()} <audio id="appBtn" src="https://ia601500.us.archive.org/29/items/confirmation-keypad-sound/confirmation-keypad-sound.wav"></audio> </Fragment> ) : showLogin()} </section> <p className="text-small text-left text-white">v4.6.8</p> </div> );
{loading ? ( <LoadingThreeDots color="white" /> ) : ( )} */ /* <div className="my-3 container-center"> <img src="/img/official-logo.jpg" alt="logo" width={300} height="auto"/> </div> */
} export default withRouter(ClientMobileApp); /*
random_line_split
ClientMobileApp.js
import React, { Fragment, useRef, useEffect, useState } from 'react'; import { Link, withRouter } from 'react-router-dom'; import Tilt from 'react-tilt' import RatingStars from './RatingStars'; import { useStoreState, useStoreDispatch } from 'easy-peasy'; import { logout } from '../../redux/actions/authActions'; import { showComponent, hideComponent } from '../../redux/actions/componentActions'; import ImageLogo from '../../components/ImageLogo'; import Login from '../../components/auth/Login'; // import LoadingThreeDots from '../../components/loadingIndicators/LoadingThreeDots'; import { convertDotToComma } from '../../utils/numbers/convertDotComma'; import animateNumber from '../../utils/numbers/animateNumber'; import getPercentage from '../../utils/numbers/getPercentage'; import ReactjsPercentageCircle from '../../components/progressIndicators/ReactjsPercentageCircle/ReactjsPercentageCircle'; import getDayGreetingBr from '../../utils/getDayGreetingBr'; // import checkIfElemIsVisible from '../../utils/window/checkIfElemIsVisible'; // SpeedDial and Icons import SpeedDialButton from '../../components/buttons/SpeedDialButton'; import showVanillaToast from '../../components/vanilla-js/toastify/showVanillaToast'; import LoyaltyIcon from '@material-ui/icons/Loyalty'; import ExitToAppIcon from '@material-ui/icons/ExitToApp'; // End SpeedDial and Incons import "./ellipse.css"; const maxScore = 500; function
({ history }) { const userScoreRef = useRef(null); // const [showMoreBtn, setShowMoreBtn] = useState(false); const [showPercentage, setShowPercentage] = useState(false); let { isUserAuth, role, loyaltyScores, userName } = useStoreState(state => ({ isUserAuth: state.authReducer.cases.isUserAuthenticated, role: state.userReducer.cases.currentUser.role, userName: state.userReducer.cases.currentUser.name, loyaltyScores: state.userReducer.cases.currentUser.loyaltyScores, })) // checkIfElemIsVisible("#rules", setShowMoreBtn) const dispatch = useStoreDispatch(); const userScore = loyaltyScores && loyaltyScores.currentScore; const userLastScore = loyaltyScores && loyaltyScores.cashCurrentScore; useEffect(() => { if(isUserAuth && role === "cliente") { animateNumber( userScoreRef.current, 0, userScore, 3000, setShowPercentage ); } }, [role, isUserAuth]) const playBeep = () => { // Not working const elem = document.querySelector("#appBtn"); elem.play(); } const showLogin = () => ( <div className="my-5"> <div className="mb-3 text-white text-em-2-5 text-center text-default"> Faça seu acesso. </div> <div className="margin-auto-60"> <Login /> </div> </div> ); const showGreeting = () => ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse"></div> <div style={{position: 'absolute', top: '-5px'}} className="ml-2 mb-2 text-white text-shadow text-em-1-4 text-left text-default"> {getDayGreetingBr()},<br/> <span className="text-em-1-8">{userName.cap() + "!"}</span> </div> </section> ); const showScores = () => { const displayAllScores = () => ( <Fragment> Fidelidômetro:<br/> <div className="d-flex justify-content-center"> <p ref={userScoreRef}>...</p> <p className="ml-3">Pontos</p> </div> {/*LAST SCORE*/} {userScore === 0 || !userScore || !showPercentage ? null : ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse2"></div> <div style={{ zIndex: 10, color: 'var(--mainPink)', position: 'absolute', top: '-5px', left: '220px'}} className="text-em-0-5 text-nowrap" > Última pontuação:<br /> <span className="text-em-1"> <strong>{convertDotToComma(userLastScore)}</strong> </span> </div> </section> )} </Fragment> ); const displayGift = () => ( <Fragment> {userScore >= maxScore ? ( <div> <p>Parabéns!<br />Você ganhou um prêmio.</p> <img className="animated bounce" style={{animationIterationCount: 20}} src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> </div> ) : ( <div> <div className="position-relative mt-4"> <img style={{opacity: '.5'}} className="animated bounce" src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> <p className="text-em-2" style={{position: 'absolute', top: '10px', left: '48%'}}>?</p> </div> </div> )} </Fragment> ); const displayPercentageCircleAndGift = () => ( <Fragment> {showPercentage ? ( <Fragment> <Tilt className="Tilt" options={{ max : 90, reverse: true }} > <div className="container-center animated zoomIn"> <ReactjsPercentageCircle percent={getPercentage(maxScore, userScore)} color="var(--mainPink)" radius={70} borderWidth={10} textStyle="text-pink text-em-1-2" /> </div> </Tilt> {displayGift()} </Fragment> ) : null} </Fragment> ); return( <div className="my-3 text-white text-em-2-5 text-center text-default"> {displayAllScores()} {displayPercentageCircleAndGift()} </div> ); }; const showRules = () => ( <Link to="/regulamento"> <div onClick={playBeep} id="rules" className="text-container font-weight-italic text-center" style={{color: "var(--mainPink)", cursor: "pointer"}} > Consulte<br />as Regras Aqui </div> </Link> ); const showMoreOptionsBtn = () => { const speedDial = { actions: [ //the order rendered is inverse from the bottom to top { icon: <ExitToAppIcon />, name: 'Desconectar', backColor: 'var(--mainPink)', onClick: () => { logout(dispatch); playBeep(); } }, { icon: <LoyaltyIcon />, name: 'Adicionar Pontos', backColor: 'var(--mainPink)', onClick: () => { hideComponent(dispatch, "login"); showComponent(dispatch, "purchaseValue"); history.push("/cliente/pontos-fidelidade"); playBeep(); }, }, ] } return( <SpeedDialButton actions={speedDial.actions} tooltipOpen={true} size="large" FabProps={{ backgroundColor: 'var(--mainPink)', size: 'medium', }} root={{ bottom: '30px', right: '40px', }} hidden={false} /> ); } return ( <div> <div className="margin-auto-90"> <ImageLogo /> </div> <section> {isUserAuth && role === "cliente" ? ( <Fragment> <br/> <br/> {showGreeting()} {showScores()} <div className="mb-4"> <RatingStars score={userScore} /> </div> <div className="mb-4"> {showRules()} </div> {showMoreOptionsBtn()} <audio id="appBtn" src="https://ia601500.us.archive.org/29/items/confirmation-keypad-sound/confirmation-keypad-sound.wav"></audio> </Fragment> ) : showLogin()} </section> <p className="text-small text-left text-white">v4.6.8</p> </div> ); } export default withRouter(ClientMobileApp); /* {loading ? ( <LoadingThreeDots color="white" /> ) : ( )} */ /* <div className="my-3 container-center"> <img src="/img/official-logo.jpg" alt="logo" width={300} height="auto"/> </div> */
ClientMobileApp
identifier_name
ClientMobileApp.js
import React, { Fragment, useRef, useEffect, useState } from 'react'; import { Link, withRouter } from 'react-router-dom'; import Tilt from 'react-tilt' import RatingStars from './RatingStars'; import { useStoreState, useStoreDispatch } from 'easy-peasy'; import { logout } from '../../redux/actions/authActions'; import { showComponent, hideComponent } from '../../redux/actions/componentActions'; import ImageLogo from '../../components/ImageLogo'; import Login from '../../components/auth/Login'; // import LoadingThreeDots from '../../components/loadingIndicators/LoadingThreeDots'; import { convertDotToComma } from '../../utils/numbers/convertDotComma'; import animateNumber from '../../utils/numbers/animateNumber'; import getPercentage from '../../utils/numbers/getPercentage'; import ReactjsPercentageCircle from '../../components/progressIndicators/ReactjsPercentageCircle/ReactjsPercentageCircle'; import getDayGreetingBr from '../../utils/getDayGreetingBr'; // import checkIfElemIsVisible from '../../utils/window/checkIfElemIsVisible'; // SpeedDial and Icons import SpeedDialButton from '../../components/buttons/SpeedDialButton'; import showVanillaToast from '../../components/vanilla-js/toastify/showVanillaToast'; import LoyaltyIcon from '@material-ui/icons/Loyalty'; import ExitToAppIcon from '@material-ui/icons/ExitToApp'; // End SpeedDial and Incons import "./ellipse.css"; const maxScore = 500; function ClientMobileApp({ history }) { const userScoreRef = useRef(null); // const [showMoreBtn, setShowMoreBtn] = useState(false); const [showPercentage, setShowPercentage] = useState(false); let { isUserAuth, role, loyaltyScores, userName } = useStoreState(state => ({ isUserAuth: state.authReducer.cases.isUserAuthenticated, role: state.userReducer.cases.currentUser.role, userName: state.userReducer.cases.currentUser.name, loyaltyScores: state.userReducer.cases.currentUser.loyaltyScores, })) // checkIfElemIsVisible("#rules", setShowMoreBtn) const dispatch = useStoreDispatch(); const userScore = loyaltyScores && loyaltyScores.currentScore; const userLastScore = loyaltyScores && loyaltyScores.cashCurrentScore; useEffect(() => { if(isUserAuth && role === "cliente")
}, [role, isUserAuth]) const playBeep = () => { // Not working const elem = document.querySelector("#appBtn"); elem.play(); } const showLogin = () => ( <div className="my-5"> <div className="mb-3 text-white text-em-2-5 text-center text-default"> Faça seu acesso. </div> <div className="margin-auto-60"> <Login /> </div> </div> ); const showGreeting = () => ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse"></div> <div style={{position: 'absolute', top: '-5px'}} className="ml-2 mb-2 text-white text-shadow text-em-1-4 text-left text-default"> {getDayGreetingBr()},<br/> <span className="text-em-1-8">{userName.cap() + "!"}</span> </div> </section> ); const showScores = () => { const displayAllScores = () => ( <Fragment> Fidelidômetro:<br/> <div className="d-flex justify-content-center"> <p ref={userScoreRef}>...</p> <p className="ml-3">Pontos</p> </div> {/*LAST SCORE*/} {userScore === 0 || !userScore || !showPercentage ? null : ( <section className="position-relative animated slideInLeft slow"> <div className="ellipse2"></div> <div style={{ zIndex: 10, color: 'var(--mainPink)', position: 'absolute', top: '-5px', left: '220px'}} className="text-em-0-5 text-nowrap" > Última pontuação:<br /> <span className="text-em-1"> <strong>{convertDotToComma(userLastScore)}</strong> </span> </div> </section> )} </Fragment> ); const displayGift = () => ( <Fragment> {userScore >= maxScore ? ( <div> <p>Parabéns!<br />Você ganhou um prêmio.</p> <img className="animated bounce" style={{animationIterationCount: 20}} src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> </div> ) : ( <div> <div className="position-relative mt-4"> <img style={{opacity: '.5'}} className="animated bounce" src="/img/icons/pink-gift-box.png" alt="presente" width={100} height="auto"/> <p className="text-em-2" style={{position: 'absolute', top: '10px', left: '48%'}}>?</p> </div> </div> )} </Fragment> ); const displayPercentageCircleAndGift = () => ( <Fragment> {showPercentage ? ( <Fragment> <Tilt className="Tilt" options={{ max : 90, reverse: true }} > <div className="container-center animated zoomIn"> <ReactjsPercentageCircle percent={getPercentage(maxScore, userScore)} color="var(--mainPink)" radius={70} borderWidth={10} textStyle="text-pink text-em-1-2" /> </div> </Tilt> {displayGift()} </Fragment> ) : null} </Fragment> ); return( <div className="my-3 text-white text-em-2-5 text-center text-default"> {displayAllScores()} {displayPercentageCircleAndGift()} </div> ); }; const showRules = () => ( <Link to="/regulamento"> <div onClick={playBeep} id="rules" className="text-container font-weight-italic text-center" style={{color: "var(--mainPink)", cursor: "pointer"}} > Consulte<br />as Regras Aqui </div> </Link> ); const showMoreOptionsBtn = () => { const speedDial = { actions: [ //the order rendered is inverse from the bottom to top { icon: <ExitToAppIcon />, name: 'Desconectar', backColor: 'var(--mainPink)', onClick: () => { logout(dispatch); playBeep(); } }, { icon: <LoyaltyIcon />, name: 'Adicionar Pontos', backColor: 'var(--mainPink)', onClick: () => { hideComponent(dispatch, "login"); showComponent(dispatch, "purchaseValue"); history.push("/cliente/pontos-fidelidade"); playBeep(); }, }, ] } return( <SpeedDialButton actions={speedDial.actions} tooltipOpen={true} size="large" FabProps={{ backgroundColor: 'var(--mainPink)', size: 'medium', }} root={{ bottom: '30px', right: '40px', }} hidden={false} /> ); } return ( <div> <div className="margin-auto-90"> <ImageLogo /> </div> <section> {isUserAuth && role === "cliente" ? ( <Fragment> <br/> <br/> {showGreeting()} {showScores()} <div className="mb-4"> <RatingStars score={userScore} /> </div> <div className="mb-4"> {showRules()} </div> {showMoreOptionsBtn()} <audio id="appBtn" src="https://ia601500.us.archive.org/29/items/confirmation-keypad-sound/confirmation-keypad-sound.wav"></audio> </Fragment> ) : showLogin()} </section> <p className="text-small text-left text-white">v4.6.8</p> </div> ); } export default withRouter(ClientMobileApp); /* {loading ? ( <LoadingThreeDots color="white" /> ) : ( )} */ /* <div className="my-3 container-center"> <img src="/img/official-logo.jpg" alt="logo" width={300} height="auto"/> </div> */
{ animateNumber( userScoreRef.current, 0, userScore, 3000, setShowPercentage ); }
conditional_block
DanXuanCiHui.js
export default [ { question: `I have been looking forward to from my parents. `, options: [`hear `, `being heared `, `be heared `, `hearing`], answer: "D" }, { question: `The manager will not us to use his car. `, options: [`have `, `let `, `agree `, `allow`], answer: "D" }, { question: ` Her and then try to copy what she does.`, options: [`Mind `, `See `, `Stare at `, `Watch`], answer: "D" }, { question: `Will you me a favor, please? `, options: [`do `, `make `, `bring `, `give`], answer: "A" }, { question: `It's bad for you to smoke in the public places where smoking is not allowed.`, options: [`behavior `, `action `, `manner `, `movement`], answer: "A" }, { question: `-it's a good idea. But who's going to the plan? - I think John and Peter will. `, options: [`carry out `, `get through `, `take in `, `set aside`], answer: "A" }, { question: `The computer system suddenly while he was searching for information on the Internet. `, options: [`broke down `, `broke out `, `broke up `, `broke in`], answer: "A" }, { question: `If she wants to stay thin, she must make a in her diet. `, options: [`change `, `turn `, `run `, `go`], answer: "A" }, { question: ` theWar of Independence, the United States was an England colony. `, options: [ `Before `, `At `, `In `, `Between` ], answer: "A" }, { question: `A police officer claimed that he had attempted to paying his fare. `, options: [`avoid `, `reject `, `refuse `, `neglect`], answer: "A" }, { question: `Mike is better than Peter swimming. `, options: [`for `, `at `, `on `, `in`], answer: "B" }, { question: `The young lady coming over to us our English teacher; the way she walks tell us that! `, options: [`must be `, `can be `, `would be `, `could be`], answer: "A" }, { question: `Eggs, though rich in nourishments, have of fat.`, options: [ `a large number of `, `the large number `, ` a large amount `, `the large amount` ], answer: "C" }, { question: `Neither John his father was able to wake up early enough to catch the morning train. `, options: [`nor `, `or `, `but `, `and`], answer: "A" }, { question: `Jane's dress is similar in design her sister's.`, options: [`like `, `with `, `to `, `as`], answer: "C" }, { question: `His salary as a driver is much higher than . `, options: [`a porter `, `is a porter `, `as a porter `, `that of porter`], answer: "D" }, { question: `-Write to me when you get home. -OK,I . `, options: [`must `, ` should `, `will `, `can`], answer: "C" }, { question: `Tom is talkative. I'm sure you'll soon get tired him. `, options: [`of `, `with `, `at `, `on`], answer: "A" }, { question: `I don't know to deal with such matter. `, options: [`what `, `how `, `which `, `/`], answer: "B" }, { question: `-is your girl friend like? -She is very kind and good-looking. `, options: [`How `, `What `, `Which `, `Who`], answer: "B" }, { question: `He driving me home, even though I told him I lived nearby. `, options: [`insisted on `, `insisted at `, `insisted that `, `insisted in`], answer: "A" }, { question: `We came finally the conclusion that she has been telling lies all the time.`, options: [`of `, `into `, `to `, `at`], answer: "C" }, { question: `I won't make the mistake next time. `, options: [`like `, `same `, `near `, `similar`], answer: "B" }, { question: `He lives in the house where he was born. `, options: [`already `, `yet `, `still `, `ever`], answer: "C" }, { question: `I am not used to speaking public.`, options: [`in `, `at `, `on `, `to`], answer: "A" }, { question: `I didn't know what to do, but then an idea suddenly to me. `, options: [`appeared `, `happened `, `occurred `, `emerged`], answer: "C" }, { question: `A pair of spectacles what I need at the moment.`, options: [`is `, `are `, `has `, `have`], answer: "A" }, { question: `You'd better a doctor as soon as possible.`, options: [`seeing `, `saw `, `see `, `seen`], answer: "C" }, { question: ` These honors he received a sun of money.`, options: [`Except `, `But `, `Besides `, `Outside`], answer: "C" }, { question: `Would you let to the park with my classmate, Mum? `, options: [`me go `, `me going `, `I go `, `I going`], answer: "A" }, { question: `Neither Bill or his parents at home. `, options: [`is `, `has `, `are `, `was`], answer: "C" }, { question: `If you don't want to get wet, you had better this umbrella with you. `, options: [`take `, `to take `, `taken `, `for taking`], answer: "A" }, { question: `Measles a long time to get over. `, options: [`spend `, `spends `, `take `, `takes`], answer: "D" }, { question: `-Do you want to wait? 您愿意等吗? -Five days too long for me to wait. `, options: [` was `, `were `, `is `, `are`], answer: "C" }, { question: `There a book and some magazines on the desk. `, options: [`is `, `are `, `have `, `has `], answer: "A" }, { question: `She is not only my classmate also my good friend. `, options: [`or `, `but `, `and `, `too`], answer: "B" }, { question: `He asked the waiter the bill. `, options: [`on `, `of `, `for `, `after`], answer: "C" }, { question: `When Lily came home at 5 p.m. yesterday, her mother dinner in the kichen.`, options: [`cooked `, `was cooking `, `cooks `, `has cooked`], answer: "B" }, { question: `Di you noticed the guy head looked like a big potato? `, options: [`who `, `which `, `whose `, `whom`], answer: "C" }, { question: `I don't know the park, but it's to be quite beautiful. `, options: [ `said `, `old `, `spoken `, `talked ` ], answer: "A" }, { question: `While I was in the university, I learned taking a photo, is very useful now for me. `, options: [`it `, `which `, `that `, ` what`], answer: "B" }, { question: `On average, a successful lawyer has to talk to several a day. `, options: [`customers `, `supporters `, `guests `, `clients`], answer: "D" }, { question: `What is the train to Birmingham? `, options: [`fee `, `tip `, `fare `, `cost`], answer: "C" }, { question: ` You shouldn't your time like that, Bob. You have to finish your school work tonight. `, options: [`cut `, `do `, `kill `, `kick`], answer: "C" }, { question: `Both the kids and their parents English, I think. I know it from their accent. `, options: [`is `, `been `, `are `, `Was `], answer: "C" }, { question: `I tried to put a telephone call to him, but his line was always bus.`, options: [`over `, `into `, `away `, ` through`], answer: "D" }, { question: `I hadn't seen him for years, but I his voice on the telephone. `, options: [`realized `, `recognized `, `discovered `, `heard`], answer: "B" }, { question: `She wonders will happen to her private life in the future. `, options: [`that `, `it `, `this `, `what`], answer: "D" }, { question: ` The higher the temperature, the liquid evaporates. `, options: [`the faster `, `the most fast `, `the slower `, `the more slower`], answer: "A" }, { question: `Australia is one of the few countries people drive on the left of the road.`, options: [`which `, `that `, `where `, `on which`], answer: "C" }, { question: `Sunday is the day people usually don't go to work. `, options: [`when `, `which `, `in which `, ` on which`], answer: "A" }, { question: ` you know,David has been well lately. `, options: [`Which `, `As `, `What `, `When`], answer: "B" }, { question: `The harder you study, you will learn. `, options: [`That `, `It `, `the more `, `what`], answer: "C" }, { question: ` They got here an hour than the others. `, options: [`early `, `much early `, `more early `, `earlier`], answer: "D" }, { question: `The grey building is the place where the workers live, and the white building is the place where the spare parts .`, options: [`are producing `, `are produced `, `produced `, `being produced`], answer: "B" }, { question: `Professor smith promised to look my paper, that is, to read it carefully before the defence. `, options: [`after `, `over `, `on `, `into`], answer: "B" }, { question: `Our house is about a mile from the railway station and there are not many houses . `, options: [`in between `, `far apart `, `among them `, `from each other`], answer: "A" }, { question: `As the bus came around the corner, it ran a big tree by the roadside. `, options: [`into `, `on `, `over `, `up`], answer: "A" }, { question: `Had you come five minutes earlier, you the train to Birmingham. But now you missed it. `, options: [ ` would catch `, `would have caught `, `could catch `, `should catch` ], answer: "B" }, { question: `Never before see such a terrible car accident on the road.`, options: [`I have `, `have I `, `I did `, `did I`], answer: "D" }, { question: `This kind of material expands the temperature increasing. `, options: [`to `, `for `, `with `, `at`], answer: "C" }, { question: `People at the party worried about him because no one was aware he had gone. `, options: [ `of where `, `of the place where `, `where `, `the place` ], answer: "A" }, { question: `A sudden noise of a fire-engine made him to the door. `, options: [`hurrying `, `hurried `, `hurry `, `to hurry`], answer: "C" }, { question: `No matter , the little sisters managed to round the sheep up and drive them back home safely. `, options: [ `it was snowing hard `, `hard it was snowing `, `how it was snowing hard `, `how hard it was snowing` ], answer: "D" }, { question: `There's lots of fruit the tree. Our little cat is also in the tree. `, options: [ `in `, `at `, `under `, `On ` ], answer: "D" }, { question: `How can be if he is not ? `, options: [ `listen; hearing `, `hear; listening `, `be listening; heard `, `be hearing; listened to` ], answer: "B" }, { question: `The students were all entertained in a Mexican restaurant, at Professor Brian's . `, options: [`money `, `pay `, `expense `, `loss`], answer: "C" }, { question: `Tom what did you do with my documents? I have never seen such a and disorder. `, options: [`mass `, `mess `, `guess `, `bus`], answer: "B" }, { question: `The atmosphere certain gases mixed together in definite proportions.`, options: [` composes of `, `is made up `, `consists of `, `makes up of`], answer: "C" }, { question: `The girl is of a film star. `, options: [`somebody `, `something `, `anybody `, `anyone`], answer: "B" }, { question: `It's time we the lecture because everybody has arrived. `, options: [`will start `, `shall start `, `start `, `started`], answer: "D" }, { question: `Therefore, other things equal, the member of workers that employers want decreases. `, options: [`is `, `are `, `being `, `having`], answer: "C" }, { question: `Two days is not enough for him to finish the work. He needs day. `, options: [`other `, `the other `, `the third `, `a third`], answer: "D" }, { question: `The red flower goes from one to in the class. `, options: [`the other `, `others `, `another `, `other`], answer: "C" }, { question: `Once environmental damage , it takes many years for the system to recover. `, options: [ `is to do `, `does `, `had done `, `is done ` ], answer: "D" }, { question: `We worked hard and completed the task . `, options: [ `in the time `, `on the time `, `ahead of time `, `before time.` ], answer: "C" }, { question: ` I didn't expect you to turn at the meeting yesterday. `, options: [ `up `, `to `, `out `, `over ` ], answer: "A" }, { question: `I'd like the teacher classes are very interesting and creative. `, options: [ `which `,
answer: "C" }, { question: `-He is not seriously ill, only a headache. `, options: [ `obvious `, `delicate `, `slight `, `temporary` ], answer: "C" }, { question: `The boy is not happy at the new school. He has friends there. `, options: [ `few `, `a few `, `little `, `a littl` ], answer: "A" }, { question: `I fell and hurt myself while I basketball yesterday.`, options: [ `was playing `, `am playing `, `play `, `played ` ], answer: "A" }, { question: `Tom more than twenty pounds on the novel. `, options: [ `spent `, `paid `, `cost `, `took ` ], answer: "A" }, { question: `Don't forget the window before leaving the room. `, options: [ `to have closed `, `to close `, `having closed `, `closing ` ], answer: "B" }, { question: `Twenty people were wounded in the air crash. `, options: [ `quickly `, `wrongly `, `bitterly `, ` seriously ` ], answer: "D" }, { question: `The top of the Great Wall is for five horses to go side by side. `, options: [ `wide `, `so wide `, `wide enough `, `enough wide` ], answer: "C" }, { question: `We have missed the last bus, I'm afraid we have no but to take taxi. `, options: [ `way `, `choice `, `possibility `, `selection ` ], answer: "B" }, { question: `Nancy is considered to be the other students in her class. `, options: [ `less intelligent `, `the most intelligent `, `intelligent as well `, `as intelligent as ` ], answer: "D" }, { question: `It's that he was wrong. `, options: [ `clearly `, `clarity `, `clear `, `clearing` ], answer: "C" }, { question: ` you are leaving tomorrow, we can have dinner together tonight.`, options: [ `Since `, `While `, `For `, `Before` ], answer: "A" }, { question: `I would like to do the job you don't force me to study. `, options: [ `in case `, `although `, `though `, `as long as` ], answer: "D" }, { question: `The reason I did not go abroad was a job in my home town. `, options: [ `because `, `due to `, `that I got `, `because of getting` ], answer: "C" }, { question: ` She survived the accident is miracle. `, options: [ `What `, `That `, `As `, `Which` ], answer: "B" }, { question: `I often see the road on his way home. `, options: [ `he cross `, `him cross `, `him crossed `, `he crossing` ], answer: "B" }, { question: `His mother alone since his father died. `, options: [ `lived `, `lives `, `has lived `, `is living` ], answer: "C" }, { question: `The workers are busy models for the exhibition. `, options: [ `to make `, `with making `, `being making `, `making` ], answer: "D" }, { question: `It was well known that Thomas Edison the electric lamp. `, options: [ `discovered `, `invented `, `found `, `developed` ], answer: "B" } ];
`who `, `whose `, `what` ],
random_line_split
trans_dist_mmei_v1_batch.py
py_name = 'trans_dist_mmei_v1_batch.py' # Leo Vo, Feb 2019 # For measuring distance between end of protospacer to TN7 flanks for NGS reads from an MmeI NGS library # Reads in FastQ files outputed from GENEIOUS Prime Software, which have transposon flank sequences removed # and are reverse complemented from the original Illumina fastq output. # Uses a master csv to run multiple files in a batch # Outputs: The script can output multiple things depending on the output options in the code below: # - An Excel file containing all relevant information # - Plots of transposition distances; either RL and LR plotted separately, or overlapped together import os import fnmatch from Bio import SeqIO import xlsxwriter from pathlib import Path import datetime as dt import csv import heapq import matplotlib.pyplot as plt plt.rcParams['svg.fonttype'] = 'none' # important so that text stays as characters in the svg output # change directory based on where input files are os.chdir('C:\\Users\\Leo Vo\\Desktop\\SEK_NGS\\190217_Integration_site_analysis_SEK') # change if necessary exp_date = "190202" user = "Leo Vo" # output options excel = False # outputs an excel spreadsheet summary excel_name = '{}_transposition_distance_mmei_duplicates.xlsx'.format(exp_date) ## Change output excel file name here plot = True # outputs plots of integration around primary site plot_overlap = True # overlaps the distributions of the opposite orientations **Will not plot if plot = False above #reads in BL21DE3 RefSeq for record in SeqIO.parse("genome.fasta", "fasta"): genome = record.seq.upper() # remember to convert to upper case genome_rc = genome.reverse_complement() if excel: # initializes excel output file, define formats used below excel_out_path = Path(excel_name) if not excel_out_path.is_file(): log = xlsxwriter.Workbook(excel_name) bold = log.add_format({'bold': True}) upsizebold = log.add_format() upsizebold.set_font_size(16) upsizebold.set_bold() percentage_format = log.add_format() percentage_format.set_num_format('0.00%') deci3_format = log.add_format() deci3_format.set_num_format('0.000') red_font = log.add_format({'font_color': 'red'}) blue_font = log.add_format({'font_color': 'blue'}) red_percent = log.add_format() red_percent.set_num_format('0.00%') red_percent.set_font_color('red') blue_percent = log.add_format() blue_percent.set_num_format('0.00%') blue_percent.set_font_color('blue') red_deci3 = log.add_format() red_deci3.set_num_format('0.000') red_deci3.set_font_color('red') blue_deci3 = log.add_format() blue_deci3.set_num_format('0.000') blue_deci3.set_font_color('blue') # make landing info sheet containing all information shared by sheets in the excel file infosheet = log.add_worksheet("General Info") infosheet.write(0, 0, "NGS MmeI Analysis of Transposition Distance ", upsizebold) infosheet.set_column(0, 0, 35) infosheet.write(2, 0, "NextSeq/NGS Data Collection Date", bold) infosheet.write(2, 1, exp_date) infosheet.write(3, 0, "Python Data Analysis Date", bold) infosheet.write(3, 1, str(dt.datetime.now())[0:10]) infosheet.write(4, 0, "Username/Initials", bold) infosheet.write(4, 1, user) infosheet.write(5, 0, "Python Code Used", bold) infosheet.write(5, 1, py_name) infosheet.write(6, 0, "Notes", bold) infosheet.write(7, 0, '"Query Window" - larger window for query distances (not the same as on-target window' ', same strand as the BL21 RefSeq') infosheet.write(8, 0, '"Genomic Base" - Base directly 5\' of integration site, on the SAME strand as protospacer') infosheet.write(9, 0, '"Example Reads" - Reads here are trimmed to 17bp and the reverse complement of reads in' 'the Geneious fastq output', ) # read in csv file containing sample codes, info and spacers. # locate fastq file based on code # for each file, determine spacer orientation then switch genome to revcom if needed (refseq) # match spacer with refseq # That + 32 = spacer_end. From refseq take window 400 before and 400 after spacer_end as refseq_query # for each fastq take reads, reverse and trim them down to the last 17 bp. Then map to refseq_query or revcom of that # if it maps to location on query, then it's RL, and trans_dist is location + 17 - spacer_end # if it maps to location on query revcom, it's LR, then trans_dist is len refseq - location -17 + 5 - spacer_end # add these trans_dist into separate out_tally lists for RL and LR, and also into a common out_tally # determine most freq trans_dist from common out_tally - main_site # set on_target as the window -50 and +50 from trans_site. # go through the separate out tally lists and remove any trans_dist not within on_target # sum of number of items from these two lists versus total of reads in the fastq is on-target freq # ratio of the 2 lists is the orientation bias # output the separate trans dists into excel output in different colors, and also a common column # use matplotlib to plot and save separately graphs for the 2 orientations def
(code, psl, description, filename, direction, refseq, spacer, date, excel, plot, plot_overlap): # main analysis function # map spacer to refseq and determine query window query_length = 500 if refseq.find(spacer) >= 0: spacer_end = refseq.find(spacer) + 32 query = refseq[spacer_end-90:spacer_end+query_length] # '-99' accounts for the -50bp of the on-target later query_rc = query.reverse_complement() spacer_end = 90 # resets spacer end index to middle of query window (no longer using full refseq) else: print("ERROR - Spacer not found within RefSeq") return total = 0 # counts total reads in fastq file out_list_all = [] # list holding common trans_dist out_list_rl = [] # list holding indv RL trans_dist values out_list_lr = [] # list holding indv LR trans_dist values # these lists are longer than query_length to hold negative values of trans_dist # the output excel cuts of list using query_length so those values will not show out_tally_rl = [0] * (query_length+spacer_end+20) # list tallying freq of tran_dist for RL out_tally_lr = [0] * (query_length+spacer_end+20) # list tallying freq of tran_dist for LR example_reads_rl = ['X'] * (query_length+spacer_end+20) # to hold example reads mapping to each trans_dist for RL example_reads_lr = ['X'] * (query_length+spacer_end+20) # to hold example reads mapping to each trans_dist for LR for record in SeqIO.parse(filename, "fastq"): # loop through all reads in fastq file total += 1 rev_seq = record.seq.reverse_complement() new_seq = rev_seq[len(rev_seq)-17:] # trim to last 17 base pair if query.find(new_seq) >= 0: # corresponds to RL trans_dist = query.find(new_seq) + 17 - spacer_end # distance in bp from end of protospacer out_list_all.append(trans_dist) # append to holding lists for processing later out_list_rl.append(trans_dist) if out_tally_rl[trans_dist] == 0: # add read to example list if this is the first occurrence example_reads_rl[trans_dist] = new_seq out_tally_rl[trans_dist] += 1 # count into tally list elif query_rc.find(new_seq) >= 0: # corresponds to LR trans_dist = len(query) - query_rc.find(new_seq) - 17 + 5 - spacer_end # dist in bp from end of protospacer out_list_all.append(trans_dist) # append to tally lists for processing later out_list_lr.append(trans_dist) if out_tally_lr[trans_dist] == 0: # add read to example list if this is the first occurrence example_reads_lr[trans_dist] = new_seq out_tally_lr[trans_dist] += 1 # count into tally list # determine most frequent trans_dist out_tally_all = [0] * query_length for i in range(0, len(out_tally_all)): out_tally_all[i] = out_tally_rl[i] + out_tally_lr[i] for x, y in enumerate(out_tally_all): if y == max(out_tally_all): main_site = x + spacer_end # remember to convert dist to site of integration # define on target window on_target_lower = main_site - 50 on_target_upper = main_site + 50 # move any trans_dist within this window into a final holding list and clears old holding list final_list_rl = [] for dist in out_list_rl: if on_target_lower <= (dist + spacer_end) <= on_target_upper: # convert dist to site of integration final_list_rl.append(dist) final_list_lr = [] for dist in out_list_lr: if on_target_lower <= (dist + spacer_end) <= on_target_upper: # convert dist to site of integration final_list_lr.append(dist) # determine on target frequency on_target_total = len(final_list_rl) + len(final_list_lr) off_target = total - on_target_total # determine top 3 most common trans_dist for highlight box # for combined RL and LR indices = [] # for zipping with out_tally lists for i in range(0, query_length): indices.append(i) top_3 = heapq.nlargest(3, zip(out_tally_all, indices)) # exists as a list of smaller 2-item lists if excel: #set up excel output sheet logsheet = log.add_worksheet(code) logsheet.set_column(0, 0, 24) logsheet.set_column(1, 1, 20) logsheet.set_column(2, 6, 17) logsheet.set_column(7, 12, 19) logsheet.write(3, 3, " ") # for clearer aesthetic logsheet.write(0, 0, "Sample ID", bold) logsheet.write(0, 1, code) logsheet.write(1, 0, "Description", bold) logsheet.write(1, 1, description) logsheet.write(2, 0, "Target Location", bold) if direction == 'fw': logsheet.write(2, 1, "5' of Integration Site") else: logsheet.write(2, 1, "3' of Integration Site (RevCom)") logsheet.write(3, 0, "Query Window", bold) if direction == 'fw': logsheet.write(3, 1, str(query)) if direction == 'rv': logsheet.write(3, 1, str(query_rc)) logsheet.write(4, 0, "Plasmid encoding gRNA", bold) logsheet.write(4, 1, psl) logsheet.write(5, 0, "Protospacer", bold) logsheet.write(5, 1, spacer) logsheet.write(6, 0, "Total Reads", bold) logsheet.write(6, 1, total) logsheet.write(7, 0, "On Target Reads", bold) logsheet.write(7, 1, on_target_total) logsheet.write(7, 2, on_target_total / total, percentage_format) logsheet.write(8, 0, "Off Target Reads", bold) logsheet.write(8, 1, off_target) logsheet.write(8, 2, off_target / total, percentage_format) logsheet.write(9, 0, "On Target Reads in RL Orientation", bold) logsheet.write(9, 1, len(final_list_rl)) logsheet.write(9, 2, len(final_list_rl) / total, percentage_format) logsheet.write(10, 0, "On Target Reads in LR Orientation", bold) logsheet.write(10, 1, len(final_list_lr)) logsheet.write(10, 2, len(final_list_lr) / total, percentage_format) logsheet.write(11, 1, "Protospacer-Transposon Distance", bold) for i in range(0, query_length): logsheet.write(i + 12, 1, i) logsheet.write(11, 0, "Genomic Base", bold) for i in range(-1, query_length - 1): logsheet.write(i + 13, 0, query[i+spacer_end]) # shift back 1 to get the base right before transposition logsheet.write(11, 2, "Number of Reads (RL)", bold) for i in range(0, query_length): logsheet.write(i + 12, 2, out_tally_rl[i], red_font) logsheet.write(11, 3, "% of Total Reads (RL)", bold) for i in range(0, query_length): logsheet.write(i + 12, 3, out_tally_rl[i]/total, red_percent) logsheet.write(11, 4, "Normalized Read Count (RL)", bold) if max(out_tally_rl) > 0: for i in range(0, query_length): logsheet.write(i + 12, 4, out_tally_rl[i] / max(out_tally_rl), red_deci3) logsheet.write(11, 5, "Number of Reads (LR)", bold) for i in range(0, query_length): logsheet.write(i + 12, 5, out_tally_lr[i], blue_font) logsheet.write(11, 6, "% of Total Reads (LR)", bold) for i in range(0, query_length): logsheet.write(i + 12, 6, out_tally_lr[i] / total, blue_percent) logsheet.write(11, 7, "Normalized Read Count (LR)", bold) if max(out_tally_lr) > 0: for i in range(0, query_length): logsheet.write(i + 12, 7, out_tally_lr[i] / max(out_tally_lr), blue_deci3) logsheet.write(11, 8, "Number of Reads (Combined)", bold) for i in range(0, query_length): logsheet.write(i + 12, 8, out_tally_all[i]) logsheet.write(11, 9, "% of Total Reads (Combined)", bold) for i in range(0, query_length): logsheet.write(i + 12, 9, out_tally_all[i] / total, percentage_format) logsheet.write(11, 10, "Normalized Read Count (Combined)", bold) if max(out_tally_all) > 0: for i in range(0, query_length): logsheet.write(i + 12, 10, out_tally_all[i] / max(out_tally_all), deci3_format) logsheet.write(11, 11, "Example Reads RL", bold) for i in range(0, query_length): logsheet.write(i + 12, 11, str(example_reads_rl[i]), red_font) logsheet.write(11, 12, "Example Reads LR", bold) for i in range(0, query_length): logsheet.write(i + 12, 12, str(example_reads_lr[i]), blue_font) # 'highlight box', take from top_3 list determined above logsheet.write(0, 4, 'Most Frequent Transposition Distances (bp)', bold) logsheet.write(1, 4, top_3[0][1]) logsheet.write(1, 5, top_3[0][0] / total, percentage_format) logsheet.write(1, 6, out_tally_rl[top_3[0][1]]/total, red_percent) logsheet.write(1, 7, out_tally_lr[top_3[0][1]] / total, blue_percent) logsheet.write(2, 4, top_3[1][1]) logsheet.write(2, 5, top_3[1][0] / total, percentage_format) logsheet.write(2, 6, out_tally_rl[top_3[1][1]] / total, red_percent) logsheet.write(2, 7, out_tally_lr[top_3[1][1]] / total, blue_percent) logsheet.write(3, 4, top_3[2][1]) logsheet.write(3, 5, top_3[2][0] / total, percentage_format) logsheet.write(3, 6, out_tally_rl[top_3[2][1]] / total, red_percent) logsheet.write(3, 7, out_tally_lr[top_3[2][1]] / total, blue_percent) logsheet.write(4, 4, 'On Target Frequency', bold) logsheet.write(4, 5, on_target_total / total, percentage_format) logsheet.write(5, 4, 'Orientation Bias (R->L:L->R)', bold) logsheet.write(5, 5, '{} : 1'.format(round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) # in case LR is 0 # plot and save graphs if plot setting = True # Only plots a certain window (e.g. from 40bp to 60bp) - change that in the xlim options below if plot: x_axis = [] # artificial x-axis for i in range(20, 61): x_axis.append(i) y_rl = out_tally_rl[20:61] y_lr = out_tally_lr[20:61] max_y = max(max(y_rl), max(y_lr)) # for scaling y axis if not plot_overlap: fig, axs = plt.subplots(1, 2) fig. tight_layout(rect=[0.15, 0.1, 1, 0.9]) title = fig.suptitle("{} - {}\nOn-target frequency: {}%\nOrientation bias (R->L:L->R): {}:1" .format(code, description, round(100*on_target_total/total, 1), round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) title.set_y(0.88) # first graph on the left in red axs[0].bar(x_axis, y_rl, color='tab:orange', width=1.0) axs[0].set_title("R->L Integration Events") # second graph on the right in blue axs[1].bar(x_axis, y_lr, color='tab:blue', width=1.0) axs[1].set_title("L->R Integration Events") fig.subplots_adjust(wspace=0.7) for axs in axs.flat: axs.spines['top'].set_visible(False) axs.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # axs.spines['left'].set_visible(False) axs.spines['bottom'].set_position('zero') axs.spines['left'].set_bounds(0, max_y) axs.set_xticks([40,45,50,55,60]) axs.set_xticklabels([40,45,50,55,60]) axs.set_yticks([0, max_y]) axs.set_yticklabels([0, max_y]) axs.set_xlim(left=39, right=61) ## Change window here axs.set_ylim(bottom=0, top=1.05*(max_y)) axs.set(xlabel="Distance from target site (bp)", ylabel="Read count") axs.yaxis.set_label_coords(-0.1,0.5) fig.set_size_inches(6, 4.2) fig.subplots_adjust(top=0.65) #plt.xlabel("Distance from target site (bp)") #plt.ylabel("Read count") #plt.gca().set_xlim(left=35, right=60) #plt.gca().set_ylim(bottom=0, top=(1.3*max_y)) # plt.savefig('test.svg', dpi=250) plt.savefig('Dist_Output\\{}_{}_{}.png'.format(date, code, description), dpi=300) plt.close() if plot_overlap: fig, axs = plt.subplots(1, 1, tight_layout=True) title = fig.suptitle("{} - {} / On-target = {}% / Bias = {} :1".format( code, description, round(100*on_target_total/total, 1), round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) title.set_y(0.9) # LR graph is colorless with a border axs.bar(x_axis, y_lr, color='none', edgecolor='#153C6B', linewidth=1.0, width=1.01, zorder=1) # RL graph is blue with no border (behind bordered RL) axs.bar(x_axis, y_rl, color='#83B0DD', edgecolor='#83B0DD', linewidth=1.0, width=1.01, zorder=0) axs.spines['top'].set_visible(False) axs.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # axs.spines['left'].set_visible(False) axs.spines['bottom'].set_position('zero') axs.spines['left'].set_bounds(0, max_y) axs.set_xticks([40, 42, 45, 50, 55, 60]) axs.set_xticklabels([40, 0, 45, 50, 55, 60]) axs.set_yticks([0, max_y]) axs.set_yticklabels([0, max_y]) axs.set_xlim(left=42, right=58) ## Change window here axs.set_ylim(bottom=0, top=1.25 * (max_y)) axs.set(xlabel="Distance from target site (bp)", ylabel="Read count") axs.yaxis.set_label_coords(-0.05, 0.4) fig.set_size_inches(5, 4.2) # plt.xlabel("Distance from target site (bp)") # plt.ylabel("Read count") # plt.gca().set_xlim(left=35, right=60) # plt.gca().set_ylim(bottom=0, top=(1.3*max_y)) # plt.savefig('test.svg', dpi=250) plt.savefig('Dist_Output_Overlap\\{}_{}_overlapped_{}.svg'.format(date, code, description), dpi=500) plt.close() # analyze all input files back-to-back using a master input .csv file with open('input_spacers.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',') next(reader, None) for row in reader: code = row[0] filename = 'none' for i in os.listdir('.'): if fnmatch.fnmatch(i, "*{}*.fastq".format(code)): filename = i break if filename != 'none': description = row[6] psl = row[7] direction = str(row[4]).lower() ## spacer direction ## set refeq based on direction if direction == 'fw': refseq = genome elif direction == 'rv': refseq = genome_rc else: print("Direction Error = {}".format(code)) break spacer = str(row[10]).upper() #spacer sequence, convert to uppercase # run main dist_find function with the relevant input variables dist_find(code, psl, description, filename, direction, refseq, spacer, exp_date, excel, plot, plot_overlap) else: print("WARNING - File Not Found For {}".format(code)) if excel: log.close()
dist_find
identifier_name
trans_dist_mmei_v1_batch.py
py_name = 'trans_dist_mmei_v1_batch.py' # Leo Vo, Feb 2019 # For measuring distance between end of protospacer to TN7 flanks for NGS reads from an MmeI NGS library # Reads in FastQ files outputed from GENEIOUS Prime Software, which have transposon flank sequences removed # and are reverse complemented from the original Illumina fastq output. # Uses a master csv to run multiple files in a batch # Outputs: The script can output multiple things depending on the output options in the code below: # - An Excel file containing all relevant information # - Plots of transposition distances; either RL and LR plotted separately, or overlapped together import os import fnmatch from Bio import SeqIO import xlsxwriter from pathlib import Path import datetime as dt import csv import heapq import matplotlib.pyplot as plt plt.rcParams['svg.fonttype'] = 'none' # important so that text stays as characters in the svg output # change directory based on where input files are os.chdir('C:\\Users\\Leo Vo\\Desktop\\SEK_NGS\\190217_Integration_site_analysis_SEK') # change if necessary exp_date = "190202" user = "Leo Vo" # output options excel = False # outputs an excel spreadsheet summary excel_name = '{}_transposition_distance_mmei_duplicates.xlsx'.format(exp_date) ## Change output excel file name here plot = True # outputs plots of integration around primary site plot_overlap = True # overlaps the distributions of the opposite orientations **Will not plot if plot = False above #reads in BL21DE3 RefSeq for record in SeqIO.parse("genome.fasta", "fasta"): genome = record.seq.upper() # remember to convert to upper case genome_rc = genome.reverse_complement() if excel: # initializes excel output file, define formats used below excel_out_path = Path(excel_name) if not excel_out_path.is_file(): log = xlsxwriter.Workbook(excel_name) bold = log.add_format({'bold': True}) upsizebold = log.add_format() upsizebold.set_font_size(16) upsizebold.set_bold() percentage_format = log.add_format() percentage_format.set_num_format('0.00%') deci3_format = log.add_format() deci3_format.set_num_format('0.000') red_font = log.add_format({'font_color': 'red'}) blue_font = log.add_format({'font_color': 'blue'}) red_percent = log.add_format() red_percent.set_num_format('0.00%') red_percent.set_font_color('red') blue_percent = log.add_format() blue_percent.set_num_format('0.00%') blue_percent.set_font_color('blue') red_deci3 = log.add_format() red_deci3.set_num_format('0.000') red_deci3.set_font_color('red') blue_deci3 = log.add_format() blue_deci3.set_num_format('0.000') blue_deci3.set_font_color('blue') # make landing info sheet containing all information shared by sheets in the excel file infosheet = log.add_worksheet("General Info") infosheet.write(0, 0, "NGS MmeI Analysis of Transposition Distance ", upsizebold) infosheet.set_column(0, 0, 35) infosheet.write(2, 0, "NextSeq/NGS Data Collection Date", bold) infosheet.write(2, 1, exp_date) infosheet.write(3, 0, "Python Data Analysis Date", bold) infosheet.write(3, 1, str(dt.datetime.now())[0:10]) infosheet.write(4, 0, "Username/Initials", bold) infosheet.write(4, 1, user) infosheet.write(5, 0, "Python Code Used", bold) infosheet.write(5, 1, py_name) infosheet.write(6, 0, "Notes", bold) infosheet.write(7, 0, '"Query Window" - larger window for query distances (not the same as on-target window' ', same strand as the BL21 RefSeq') infosheet.write(8, 0, '"Genomic Base" - Base directly 5\' of integration site, on the SAME strand as protospacer') infosheet.write(9, 0, '"Example Reads" - Reads here are trimmed to 17bp and the reverse complement of reads in' 'the Geneious fastq output', ) # read in csv file containing sample codes, info and spacers. # locate fastq file based on code # for each file, determine spacer orientation then switch genome to revcom if needed (refseq) # match spacer with refseq # That + 32 = spacer_end. From refseq take window 400 before and 400 after spacer_end as refseq_query # for each fastq take reads, reverse and trim them down to the last 17 bp. Then map to refseq_query or revcom of that # if it maps to location on query, then it's RL, and trans_dist is location + 17 - spacer_end # if it maps to location on query revcom, it's LR, then trans_dist is len refseq - location -17 + 5 - spacer_end # add these trans_dist into separate out_tally lists for RL and LR, and also into a common out_tally # determine most freq trans_dist from common out_tally - main_site # set on_target as the window -50 and +50 from trans_site. # go through the separate out tally lists and remove any trans_dist not within on_target # sum of number of items from these two lists versus total of reads in the fastq is on-target freq # ratio of the 2 lists is the orientation bias # output the separate trans dists into excel output in different colors, and also a common column # use matplotlib to plot and save separately graphs for the 2 orientations def dist_find(code, psl, description, filename, direction, refseq, spacer, date, excel, plot, plot_overlap): # main analysis function # map spacer to refseq and determine query window query_length = 500 if refseq.find(spacer) >= 0: spacer_end = refseq.find(spacer) + 32 query = refseq[spacer_end-90:spacer_end+query_length] # '-99' accounts for the -50bp of the on-target later query_rc = query.reverse_complement() spacer_end = 90 # resets spacer end index to middle of query window (no longer using full refseq) else: print("ERROR - Spacer not found within RefSeq") return total = 0 # counts total reads in fastq file out_list_all = [] # list holding common trans_dist out_list_rl = [] # list holding indv RL trans_dist values out_list_lr = [] # list holding indv LR trans_dist values # these lists are longer than query_length to hold negative values of trans_dist # the output excel cuts of list using query_length so those values will not show out_tally_rl = [0] * (query_length+spacer_end+20) # list tallying freq of tran_dist for RL out_tally_lr = [0] * (query_length+spacer_end+20) # list tallying freq of tran_dist for LR example_reads_rl = ['X'] * (query_length+spacer_end+20) # to hold example reads mapping to each trans_dist for RL example_reads_lr = ['X'] * (query_length+spacer_end+20) # to hold example reads mapping to each trans_dist for LR for record in SeqIO.parse(filename, "fastq"): # loop through all reads in fastq file total += 1 rev_seq = record.seq.reverse_complement() new_seq = rev_seq[len(rev_seq)-17:] # trim to last 17 base pair if query.find(new_seq) >= 0: # corresponds to RL trans_dist = query.find(new_seq) + 17 - spacer_end # distance in bp from end of protospacer out_list_all.append(trans_dist) # append to holding lists for processing later out_list_rl.append(trans_dist) if out_tally_rl[trans_dist] == 0: # add read to example list if this is the first occurrence example_reads_rl[trans_dist] = new_seq out_tally_rl[trans_dist] += 1 # count into tally list elif query_rc.find(new_seq) >= 0: # corresponds to LR trans_dist = len(query) - query_rc.find(new_seq) - 17 + 5 - spacer_end # dist in bp from end of protospacer out_list_all.append(trans_dist) # append to tally lists for processing later out_list_lr.append(trans_dist) if out_tally_lr[trans_dist] == 0: # add read to example list if this is the first occurrence example_reads_lr[trans_dist] = new_seq out_tally_lr[trans_dist] += 1 # count into tally list # determine most frequent trans_dist out_tally_all = [0] * query_length for i in range(0, len(out_tally_all)): out_tally_all[i] = out_tally_rl[i] + out_tally_lr[i] for x, y in enumerate(out_tally_all): if y == max(out_tally_all): main_site = x + spacer_end # remember to convert dist to site of integration # define on target window on_target_lower = main_site - 50 on_target_upper = main_site + 50 # move any trans_dist within this window into a final holding list and clears old holding list final_list_rl = [] for dist in out_list_rl: if on_target_lower <= (dist + spacer_end) <= on_target_upper: # convert dist to site of integration final_list_rl.append(dist) final_list_lr = [] for dist in out_list_lr: if on_target_lower <= (dist + spacer_end) <= on_target_upper: # convert dist to site of integration final_list_lr.append(dist) # determine on target frequency on_target_total = len(final_list_rl) + len(final_list_lr) off_target = total - on_target_total # determine top 3 most common trans_dist for highlight box # for combined RL and LR indices = [] # for zipping with out_tally lists for i in range(0, query_length): indices.append(i) top_3 = heapq.nlargest(3, zip(out_tally_all, indices)) # exists as a list of smaller 2-item lists if excel: #set up excel output sheet logsheet = log.add_worksheet(code) logsheet.set_column(0, 0, 24) logsheet.set_column(1, 1, 20) logsheet.set_column(2, 6, 17) logsheet.set_column(7, 12, 19) logsheet.write(3, 3, " ") # for clearer aesthetic logsheet.write(0, 0, "Sample ID", bold) logsheet.write(0, 1, code) logsheet.write(1, 0, "Description", bold) logsheet.write(1, 1, description) logsheet.write(2, 0, "Target Location", bold) if direction == 'fw': logsheet.write(2, 1, "5' of Integration Site") else: logsheet.write(2, 1, "3' of Integration Site (RevCom)") logsheet.write(3, 0, "Query Window", bold) if direction == 'fw': logsheet.write(3, 1, str(query)) if direction == 'rv': logsheet.write(3, 1, str(query_rc)) logsheet.write(4, 0, "Plasmid encoding gRNA", bold) logsheet.write(4, 1, psl) logsheet.write(5, 0, "Protospacer", bold) logsheet.write(5, 1, spacer) logsheet.write(6, 0, "Total Reads", bold) logsheet.write(6, 1, total) logsheet.write(7, 0, "On Target Reads", bold) logsheet.write(7, 1, on_target_total) logsheet.write(7, 2, on_target_total / total, percentage_format) logsheet.write(8, 0, "Off Target Reads", bold) logsheet.write(8, 1, off_target) logsheet.write(8, 2, off_target / total, percentage_format) logsheet.write(9, 0, "On Target Reads in RL Orientation", bold) logsheet.write(9, 1, len(final_list_rl)) logsheet.write(9, 2, len(final_list_rl) / total, percentage_format) logsheet.write(10, 0, "On Target Reads in LR Orientation", bold) logsheet.write(10, 1, len(final_list_lr)) logsheet.write(10, 2, len(final_list_lr) / total, percentage_format) logsheet.write(11, 1, "Protospacer-Transposon Distance", bold) for i in range(0, query_length): logsheet.write(i + 12, 1, i) logsheet.write(11, 0, "Genomic Base", bold) for i in range(-1, query_length - 1): logsheet.write(i + 13, 0, query[i+spacer_end]) # shift back 1 to get the base right before transposition logsheet.write(11, 2, "Number of Reads (RL)", bold) for i in range(0, query_length): logsheet.write(i + 12, 2, out_tally_rl[i], red_font) logsheet.write(11, 3, "% of Total Reads (RL)", bold) for i in range(0, query_length): logsheet.write(i + 12, 3, out_tally_rl[i]/total, red_percent) logsheet.write(11, 4, "Normalized Read Count (RL)", bold) if max(out_tally_rl) > 0: for i in range(0, query_length): logsheet.write(i + 12, 4, out_tally_rl[i] / max(out_tally_rl), red_deci3) logsheet.write(11, 5, "Number of Reads (LR)", bold) for i in range(0, query_length): logsheet.write(i + 12, 5, out_tally_lr[i], blue_font) logsheet.write(11, 6, "% of Total Reads (LR)", bold) for i in range(0, query_length): logsheet.write(i + 12, 6, out_tally_lr[i] / total, blue_percent) logsheet.write(11, 7, "Normalized Read Count (LR)", bold) if max(out_tally_lr) > 0: for i in range(0, query_length): logsheet.write(i + 12, 7, out_tally_lr[i] / max(out_tally_lr), blue_deci3) logsheet.write(11, 8, "Number of Reads (Combined)", bold) for i in range(0, query_length): logsheet.write(i + 12, 8, out_tally_all[i]) logsheet.write(11, 9, "% of Total Reads (Combined)", bold) for i in range(0, query_length): logsheet.write(i + 12, 9, out_tally_all[i] / total, percentage_format) logsheet.write(11, 10, "Normalized Read Count (Combined)", bold) if max(out_tally_all) > 0: for i in range(0, query_length): logsheet.write(i + 12, 10, out_tally_all[i] / max(out_tally_all), deci3_format) logsheet.write(11, 11, "Example Reads RL", bold) for i in range(0, query_length): logsheet.write(i + 12, 11, str(example_reads_rl[i]), red_font) logsheet.write(11, 12, "Example Reads LR", bold) for i in range(0, query_length): logsheet.write(i + 12, 12, str(example_reads_lr[i]), blue_font) # 'highlight box', take from top_3 list determined above logsheet.write(0, 4, 'Most Frequent Transposition Distances (bp)', bold) logsheet.write(1, 4, top_3[0][1]) logsheet.write(1, 5, top_3[0][0] / total, percentage_format) logsheet.write(1, 6, out_tally_rl[top_3[0][1]]/total, red_percent) logsheet.write(1, 7, out_tally_lr[top_3[0][1]] / total, blue_percent) logsheet.write(2, 4, top_3[1][1]) logsheet.write(2, 5, top_3[1][0] / total, percentage_format) logsheet.write(2, 6, out_tally_rl[top_3[1][1]] / total, red_percent) logsheet.write(2, 7, out_tally_lr[top_3[1][1]] / total, blue_percent) logsheet.write(3, 4, top_3[2][1]) logsheet.write(3, 5, top_3[2][0] / total, percentage_format) logsheet.write(3, 6, out_tally_rl[top_3[2][1]] / total, red_percent) logsheet.write(3, 7, out_tally_lr[top_3[2][1]] / total, blue_percent) logsheet.write(4, 4, 'On Target Frequency', bold) logsheet.write(4, 5, on_target_total / total, percentage_format) logsheet.write(5, 4, 'Orientation Bias (R->L:L->R)', bold) logsheet.write(5, 5, '{} : 1'.format(round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) # in case LR is 0 # plot and save graphs if plot setting = True # Only plots a certain window (e.g. from 40bp to 60bp) - change that in the xlim options below if plot: x_axis = [] # artificial x-axis for i in range(20, 61): x_axis.append(i) y_rl = out_tally_rl[20:61] y_lr = out_tally_lr[20:61] max_y = max(max(y_rl), max(y_lr)) # for scaling y axis if not plot_overlap: fig, axs = plt.subplots(1, 2) fig. tight_layout(rect=[0.15, 0.1, 1, 0.9]) title = fig.suptitle("{} - {}\nOn-target frequency: {}%\nOrientation bias (R->L:L->R): {}:1" .format(code, description, round(100*on_target_total/total, 1), round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) title.set_y(0.88) # first graph on the left in red axs[0].bar(x_axis, y_rl, color='tab:orange', width=1.0) axs[0].set_title("R->L Integration Events") # second graph on the right in blue axs[1].bar(x_axis, y_lr, color='tab:blue', width=1.0) axs[1].set_title("L->R Integration Events") fig.subplots_adjust(wspace=0.7) for axs in axs.flat: axs.spines['top'].set_visible(False) axs.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # axs.spines['left'].set_visible(False) axs.spines['bottom'].set_position('zero') axs.spines['left'].set_bounds(0, max_y) axs.set_xticks([40,45,50,55,60]) axs.set_xticklabels([40,45,50,55,60]) axs.set_yticks([0, max_y]) axs.set_yticklabels([0, max_y]) axs.set_xlim(left=39, right=61) ## Change window here axs.set_ylim(bottom=0, top=1.05*(max_y)) axs.set(xlabel="Distance from target site (bp)", ylabel="Read count") axs.yaxis.set_label_coords(-0.1,0.5) fig.set_size_inches(6, 4.2) fig.subplots_adjust(top=0.65) #plt.xlabel("Distance from target site (bp)") #plt.ylabel("Read count") #plt.gca().set_xlim(left=35, right=60) #plt.gca().set_ylim(bottom=0, top=(1.3*max_y)) # plt.savefig('test.svg', dpi=250) plt.savefig('Dist_Output\\{}_{}_{}.png'.format(date, code, description), dpi=300) plt.close() if plot_overlap: fig, axs = plt.subplots(1, 1, tight_layout=True) title = fig.suptitle("{} - {} / On-target = {}% / Bias = {} :1".format( code, description, round(100*on_target_total/total, 1), round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) title.set_y(0.9) # LR graph is colorless with a border axs.bar(x_axis, y_lr, color='none', edgecolor='#153C6B', linewidth=1.0, width=1.01, zorder=1) # RL graph is blue with no border (behind bordered RL) axs.bar(x_axis, y_rl, color='#83B0DD', edgecolor='#83B0DD', linewidth=1.0, width=1.01, zorder=0) axs.spines['top'].set_visible(False) axs.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # axs.spines['left'].set_visible(False) axs.spines['bottom'].set_position('zero') axs.spines['left'].set_bounds(0, max_y) axs.set_xticks([40, 42, 45, 50, 55, 60]) axs.set_xticklabels([40, 0, 45, 50, 55, 60]) axs.set_yticks([0, max_y]) axs.set_yticklabels([0, max_y]) axs.set_xlim(left=42, right=58) ## Change window here axs.set_ylim(bottom=0, top=1.25 * (max_y)) axs.set(xlabel="Distance from target site (bp)", ylabel="Read count") axs.yaxis.set_label_coords(-0.05, 0.4) fig.set_size_inches(5, 4.2) # plt.xlabel("Distance from target site (bp)") # plt.ylabel("Read count") # plt.gca().set_xlim(left=35, right=60) # plt.gca().set_ylim(bottom=0, top=(1.3*max_y)) # plt.savefig('test.svg', dpi=250) plt.savefig('Dist_Output_Overlap\\{}_{}_overlapped_{}.svg'.format(date, code, description), dpi=500) plt.close() # analyze all input files back-to-back using a master input .csv file with open('input_spacers.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',') next(reader, None) for row in reader: code = row[0] filename = 'none' for i in os.listdir('.'): if fnmatch.fnmatch(i, "*{}*.fastq".format(code)): filename = i break if filename != 'none': description = row[6] psl = row[7] direction = str(row[4]).lower() ## spacer direction ## set refeq based on direction if direction == 'fw': refseq = genome elif direction == 'rv': refseq = genome_rc else: print("Direction Error = {}".format(code)) break spacer = str(row[10]).upper() #spacer sequence, convert to uppercase # run main dist_find function with the relevant input variables dist_find(code, psl, description, filename, direction, refseq, spacer, exp_date, excel, plot, plot_overlap) else:
if excel: log.close()
print("WARNING - File Not Found For {}".format(code))
conditional_block
trans_dist_mmei_v1_batch.py
py_name = 'trans_dist_mmei_v1_batch.py' # Leo Vo, Feb 2019 # For measuring distance between end of protospacer to TN7 flanks for NGS reads from an MmeI NGS library # Reads in FastQ files outputed from GENEIOUS Prime Software, which have transposon flank sequences removed # and are reverse complemented from the original Illumina fastq output. # Uses a master csv to run multiple files in a batch # Outputs: The script can output multiple things depending on the output options in the code below: # - An Excel file containing all relevant information # - Plots of transposition distances; either RL and LR plotted separately, or overlapped together import os import fnmatch from Bio import SeqIO import xlsxwriter from pathlib import Path import datetime as dt import csv import heapq import matplotlib.pyplot as plt plt.rcParams['svg.fonttype'] = 'none' # important so that text stays as characters in the svg output # change directory based on where input files are os.chdir('C:\\Users\\Leo Vo\\Desktop\\SEK_NGS\\190217_Integration_site_analysis_SEK') # change if necessary exp_date = "190202" user = "Leo Vo" # output options excel = False # outputs an excel spreadsheet summary excel_name = '{}_transposition_distance_mmei_duplicates.xlsx'.format(exp_date) ## Change output excel file name here plot = True # outputs plots of integration around primary site plot_overlap = True # overlaps the distributions of the opposite orientations **Will not plot if plot = False above #reads in BL21DE3 RefSeq for record in SeqIO.parse("genome.fasta", "fasta"): genome = record.seq.upper() # remember to convert to upper case genome_rc = genome.reverse_complement() if excel: # initializes excel output file, define formats used below excel_out_path = Path(excel_name) if not excel_out_path.is_file(): log = xlsxwriter.Workbook(excel_name) bold = log.add_format({'bold': True}) upsizebold = log.add_format() upsizebold.set_font_size(16) upsizebold.set_bold() percentage_format = log.add_format() percentage_format.set_num_format('0.00%') deci3_format = log.add_format() deci3_format.set_num_format('0.000') red_font = log.add_format({'font_color': 'red'}) blue_font = log.add_format({'font_color': 'blue'}) red_percent = log.add_format() red_percent.set_num_format('0.00%') red_percent.set_font_color('red') blue_percent = log.add_format() blue_percent.set_num_format('0.00%') blue_percent.set_font_color('blue') red_deci3 = log.add_format() red_deci3.set_num_format('0.000') red_deci3.set_font_color('red') blue_deci3 = log.add_format() blue_deci3.set_num_format('0.000') blue_deci3.set_font_color('blue') # make landing info sheet containing all information shared by sheets in the excel file infosheet = log.add_worksheet("General Info") infosheet.write(0, 0, "NGS MmeI Analysis of Transposition Distance ", upsizebold) infosheet.set_column(0, 0, 35) infosheet.write(2, 0, "NextSeq/NGS Data Collection Date", bold) infosheet.write(2, 1, exp_date) infosheet.write(3, 0, "Python Data Analysis Date", bold) infosheet.write(3, 1, str(dt.datetime.now())[0:10]) infosheet.write(4, 0, "Username/Initials", bold) infosheet.write(4, 1, user) infosheet.write(5, 0, "Python Code Used", bold) infosheet.write(5, 1, py_name) infosheet.write(6, 0, "Notes", bold) infosheet.write(7, 0, '"Query Window" - larger window for query distances (not the same as on-target window' ', same strand as the BL21 RefSeq') infosheet.write(8, 0, '"Genomic Base" - Base directly 5\' of integration site, on the SAME strand as protospacer') infosheet.write(9, 0, '"Example Reads" - Reads here are trimmed to 17bp and the reverse complement of reads in' 'the Geneious fastq output', ) # read in csv file containing sample codes, info and spacers. # locate fastq file based on code # for each file, determine spacer orientation then switch genome to revcom if needed (refseq) # match spacer with refseq # That + 32 = spacer_end. From refseq take window 400 before and 400 after spacer_end as refseq_query # for each fastq take reads, reverse and trim them down to the last 17 bp. Then map to refseq_query or revcom of that # if it maps to location on query, then it's RL, and trans_dist is location + 17 - spacer_end # if it maps to location on query revcom, it's LR, then trans_dist is len refseq - location -17 + 5 - spacer_end # add these trans_dist into separate out_tally lists for RL and LR, and also into a common out_tally # determine most freq trans_dist from common out_tally - main_site # set on_target as the window -50 and +50 from trans_site. # go through the separate out tally lists and remove any trans_dist not within on_target # sum of number of items from these two lists versus total of reads in the fastq is on-target freq # ratio of the 2 lists is the orientation bias # output the separate trans dists into excel output in different colors, and also a common column # use matplotlib to plot and save separately graphs for the 2 orientations def dist_find(code, psl, description, filename, direction, refseq, spacer, date, excel, plot, plot_overlap): # main analysis function # map spacer to refseq and determine query window
# analyze all input files back-to-back using a master input .csv file with open('input_spacers.csv', newline='') as csvfile: reader = csv.reader(csvfile, delimiter=',') next(reader, None) for row in reader: code = row[0] filename = 'none' for i in os.listdir('.'): if fnmatch.fnmatch(i, "*{}*.fastq".format(code)): filename = i break if filename != 'none': description = row[6] psl = row[7] direction = str(row[4]).lower() ## spacer direction ## set refeq based on direction if direction == 'fw': refseq = genome elif direction == 'rv': refseq = genome_rc else: print("Direction Error = {}".format(code)) break spacer = str(row[10]).upper() #spacer sequence, convert to uppercase # run main dist_find function with the relevant input variables dist_find(code, psl, description, filename, direction, refseq, spacer, exp_date, excel, plot, plot_overlap) else: print("WARNING - File Not Found For {}".format(code)) if excel: log.close()
query_length = 500 if refseq.find(spacer) >= 0: spacer_end = refseq.find(spacer) + 32 query = refseq[spacer_end-90:spacer_end+query_length] # '-99' accounts for the -50bp of the on-target later query_rc = query.reverse_complement() spacer_end = 90 # resets spacer end index to middle of query window (no longer using full refseq) else: print("ERROR - Spacer not found within RefSeq") return total = 0 # counts total reads in fastq file out_list_all = [] # list holding common trans_dist out_list_rl = [] # list holding indv RL trans_dist values out_list_lr = [] # list holding indv LR trans_dist values # these lists are longer than query_length to hold negative values of trans_dist # the output excel cuts of list using query_length so those values will not show out_tally_rl = [0] * (query_length+spacer_end+20) # list tallying freq of tran_dist for RL out_tally_lr = [0] * (query_length+spacer_end+20) # list tallying freq of tran_dist for LR example_reads_rl = ['X'] * (query_length+spacer_end+20) # to hold example reads mapping to each trans_dist for RL example_reads_lr = ['X'] * (query_length+spacer_end+20) # to hold example reads mapping to each trans_dist for LR for record in SeqIO.parse(filename, "fastq"): # loop through all reads in fastq file total += 1 rev_seq = record.seq.reverse_complement() new_seq = rev_seq[len(rev_seq)-17:] # trim to last 17 base pair if query.find(new_seq) >= 0: # corresponds to RL trans_dist = query.find(new_seq) + 17 - spacer_end # distance in bp from end of protospacer out_list_all.append(trans_dist) # append to holding lists for processing later out_list_rl.append(trans_dist) if out_tally_rl[trans_dist] == 0: # add read to example list if this is the first occurrence example_reads_rl[trans_dist] = new_seq out_tally_rl[trans_dist] += 1 # count into tally list elif query_rc.find(new_seq) >= 0: # corresponds to LR trans_dist = len(query) - query_rc.find(new_seq) - 17 + 5 - spacer_end # dist in bp from end of protospacer out_list_all.append(trans_dist) # append to tally lists for processing later out_list_lr.append(trans_dist) if out_tally_lr[trans_dist] == 0: # add read to example list if this is the first occurrence example_reads_lr[trans_dist] = new_seq out_tally_lr[trans_dist] += 1 # count into tally list # determine most frequent trans_dist out_tally_all = [0] * query_length for i in range(0, len(out_tally_all)): out_tally_all[i] = out_tally_rl[i] + out_tally_lr[i] for x, y in enumerate(out_tally_all): if y == max(out_tally_all): main_site = x + spacer_end # remember to convert dist to site of integration # define on target window on_target_lower = main_site - 50 on_target_upper = main_site + 50 # move any trans_dist within this window into a final holding list and clears old holding list final_list_rl = [] for dist in out_list_rl: if on_target_lower <= (dist + spacer_end) <= on_target_upper: # convert dist to site of integration final_list_rl.append(dist) final_list_lr = [] for dist in out_list_lr: if on_target_lower <= (dist + spacer_end) <= on_target_upper: # convert dist to site of integration final_list_lr.append(dist) # determine on target frequency on_target_total = len(final_list_rl) + len(final_list_lr) off_target = total - on_target_total # determine top 3 most common trans_dist for highlight box # for combined RL and LR indices = [] # for zipping with out_tally lists for i in range(0, query_length): indices.append(i) top_3 = heapq.nlargest(3, zip(out_tally_all, indices)) # exists as a list of smaller 2-item lists if excel: #set up excel output sheet logsheet = log.add_worksheet(code) logsheet.set_column(0, 0, 24) logsheet.set_column(1, 1, 20) logsheet.set_column(2, 6, 17) logsheet.set_column(7, 12, 19) logsheet.write(3, 3, " ") # for clearer aesthetic logsheet.write(0, 0, "Sample ID", bold) logsheet.write(0, 1, code) logsheet.write(1, 0, "Description", bold) logsheet.write(1, 1, description) logsheet.write(2, 0, "Target Location", bold) if direction == 'fw': logsheet.write(2, 1, "5' of Integration Site") else: logsheet.write(2, 1, "3' of Integration Site (RevCom)") logsheet.write(3, 0, "Query Window", bold) if direction == 'fw': logsheet.write(3, 1, str(query)) if direction == 'rv': logsheet.write(3, 1, str(query_rc)) logsheet.write(4, 0, "Plasmid encoding gRNA", bold) logsheet.write(4, 1, psl) logsheet.write(5, 0, "Protospacer", bold) logsheet.write(5, 1, spacer) logsheet.write(6, 0, "Total Reads", bold) logsheet.write(6, 1, total) logsheet.write(7, 0, "On Target Reads", bold) logsheet.write(7, 1, on_target_total) logsheet.write(7, 2, on_target_total / total, percentage_format) logsheet.write(8, 0, "Off Target Reads", bold) logsheet.write(8, 1, off_target) logsheet.write(8, 2, off_target / total, percentage_format) logsheet.write(9, 0, "On Target Reads in RL Orientation", bold) logsheet.write(9, 1, len(final_list_rl)) logsheet.write(9, 2, len(final_list_rl) / total, percentage_format) logsheet.write(10, 0, "On Target Reads in LR Orientation", bold) logsheet.write(10, 1, len(final_list_lr)) logsheet.write(10, 2, len(final_list_lr) / total, percentage_format) logsheet.write(11, 1, "Protospacer-Transposon Distance", bold) for i in range(0, query_length): logsheet.write(i + 12, 1, i) logsheet.write(11, 0, "Genomic Base", bold) for i in range(-1, query_length - 1): logsheet.write(i + 13, 0, query[i+spacer_end]) # shift back 1 to get the base right before transposition logsheet.write(11, 2, "Number of Reads (RL)", bold) for i in range(0, query_length): logsheet.write(i + 12, 2, out_tally_rl[i], red_font) logsheet.write(11, 3, "% of Total Reads (RL)", bold) for i in range(0, query_length): logsheet.write(i + 12, 3, out_tally_rl[i]/total, red_percent) logsheet.write(11, 4, "Normalized Read Count (RL)", bold) if max(out_tally_rl) > 0: for i in range(0, query_length): logsheet.write(i + 12, 4, out_tally_rl[i] / max(out_tally_rl), red_deci3) logsheet.write(11, 5, "Number of Reads (LR)", bold) for i in range(0, query_length): logsheet.write(i + 12, 5, out_tally_lr[i], blue_font) logsheet.write(11, 6, "% of Total Reads (LR)", bold) for i in range(0, query_length): logsheet.write(i + 12, 6, out_tally_lr[i] / total, blue_percent) logsheet.write(11, 7, "Normalized Read Count (LR)", bold) if max(out_tally_lr) > 0: for i in range(0, query_length): logsheet.write(i + 12, 7, out_tally_lr[i] / max(out_tally_lr), blue_deci3) logsheet.write(11, 8, "Number of Reads (Combined)", bold) for i in range(0, query_length): logsheet.write(i + 12, 8, out_tally_all[i]) logsheet.write(11, 9, "% of Total Reads (Combined)", bold) for i in range(0, query_length): logsheet.write(i + 12, 9, out_tally_all[i] / total, percentage_format) logsheet.write(11, 10, "Normalized Read Count (Combined)", bold) if max(out_tally_all) > 0: for i in range(0, query_length): logsheet.write(i + 12, 10, out_tally_all[i] / max(out_tally_all), deci3_format) logsheet.write(11, 11, "Example Reads RL", bold) for i in range(0, query_length): logsheet.write(i + 12, 11, str(example_reads_rl[i]), red_font) logsheet.write(11, 12, "Example Reads LR", bold) for i in range(0, query_length): logsheet.write(i + 12, 12, str(example_reads_lr[i]), blue_font) # 'highlight box', take from top_3 list determined above logsheet.write(0, 4, 'Most Frequent Transposition Distances (bp)', bold) logsheet.write(1, 4, top_3[0][1]) logsheet.write(1, 5, top_3[0][0] / total, percentage_format) logsheet.write(1, 6, out_tally_rl[top_3[0][1]]/total, red_percent) logsheet.write(1, 7, out_tally_lr[top_3[0][1]] / total, blue_percent) logsheet.write(2, 4, top_3[1][1]) logsheet.write(2, 5, top_3[1][0] / total, percentage_format) logsheet.write(2, 6, out_tally_rl[top_3[1][1]] / total, red_percent) logsheet.write(2, 7, out_tally_lr[top_3[1][1]] / total, blue_percent) logsheet.write(3, 4, top_3[2][1]) logsheet.write(3, 5, top_3[2][0] / total, percentage_format) logsheet.write(3, 6, out_tally_rl[top_3[2][1]] / total, red_percent) logsheet.write(3, 7, out_tally_lr[top_3[2][1]] / total, blue_percent) logsheet.write(4, 4, 'On Target Frequency', bold) logsheet.write(4, 5, on_target_total / total, percentage_format) logsheet.write(5, 4, 'Orientation Bias (R->L:L->R)', bold) logsheet.write(5, 5, '{} : 1'.format(round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) # in case LR is 0 # plot and save graphs if plot setting = True # Only plots a certain window (e.g. from 40bp to 60bp) - change that in the xlim options below if plot: x_axis = [] # artificial x-axis for i in range(20, 61): x_axis.append(i) y_rl = out_tally_rl[20:61] y_lr = out_tally_lr[20:61] max_y = max(max(y_rl), max(y_lr)) # for scaling y axis if not plot_overlap: fig, axs = plt.subplots(1, 2) fig. tight_layout(rect=[0.15, 0.1, 1, 0.9]) title = fig.suptitle("{} - {}\nOn-target frequency: {}%\nOrientation bias (R->L:L->R): {}:1" .format(code, description, round(100*on_target_total/total, 1), round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) title.set_y(0.88) # first graph on the left in red axs[0].bar(x_axis, y_rl, color='tab:orange', width=1.0) axs[0].set_title("R->L Integration Events") # second graph on the right in blue axs[1].bar(x_axis, y_lr, color='tab:blue', width=1.0) axs[1].set_title("L->R Integration Events") fig.subplots_adjust(wspace=0.7) for axs in axs.flat: axs.spines['top'].set_visible(False) axs.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # axs.spines['left'].set_visible(False) axs.spines['bottom'].set_position('zero') axs.spines['left'].set_bounds(0, max_y) axs.set_xticks([40,45,50,55,60]) axs.set_xticklabels([40,45,50,55,60]) axs.set_yticks([0, max_y]) axs.set_yticklabels([0, max_y]) axs.set_xlim(left=39, right=61) ## Change window here axs.set_ylim(bottom=0, top=1.05*(max_y)) axs.set(xlabel="Distance from target site (bp)", ylabel="Read count") axs.yaxis.set_label_coords(-0.1,0.5) fig.set_size_inches(6, 4.2) fig.subplots_adjust(top=0.65) #plt.xlabel("Distance from target site (bp)") #plt.ylabel("Read count") #plt.gca().set_xlim(left=35, right=60) #plt.gca().set_ylim(bottom=0, top=(1.3*max_y)) # plt.savefig('test.svg', dpi=250) plt.savefig('Dist_Output\\{}_{}_{}.png'.format(date, code, description), dpi=300) plt.close() if plot_overlap: fig, axs = plt.subplots(1, 1, tight_layout=True) title = fig.suptitle("{} - {} / On-target = {}% / Bias = {} :1".format( code, description, round(100*on_target_total/total, 1), round(len(final_list_rl)/(len(final_list_lr)+0.00000001), 2))) title.set_y(0.9) # LR graph is colorless with a border axs.bar(x_axis, y_lr, color='none', edgecolor='#153C6B', linewidth=1.0, width=1.01, zorder=1) # RL graph is blue with no border (behind bordered RL) axs.bar(x_axis, y_rl, color='#83B0DD', edgecolor='#83B0DD', linewidth=1.0, width=1.01, zorder=0) axs.spines['top'].set_visible(False) axs.spines['right'].set_visible(False) # ax.spines['bottom'].set_visible(False) # axs.spines['left'].set_visible(False) axs.spines['bottom'].set_position('zero') axs.spines['left'].set_bounds(0, max_y) axs.set_xticks([40, 42, 45, 50, 55, 60]) axs.set_xticklabels([40, 0, 45, 50, 55, 60]) axs.set_yticks([0, max_y]) axs.set_yticklabels([0, max_y]) axs.set_xlim(left=42, right=58) ## Change window here axs.set_ylim(bottom=0, top=1.25 * (max_y)) axs.set(xlabel="Distance from target site (bp)", ylabel="Read count") axs.yaxis.set_label_coords(-0.05, 0.4) fig.set_size_inches(5, 4.2) # plt.xlabel("Distance from target site (bp)") # plt.ylabel("Read count") # plt.gca().set_xlim(left=35, right=60) # plt.gca().set_ylim(bottom=0, top=(1.3*max_y)) # plt.savefig('test.svg', dpi=250) plt.savefig('Dist_Output_Overlap\\{}_{}_overlapped_{}.svg'.format(date, code, description), dpi=500) plt.close()
identifier_body
hashed.rs
//! Implementation using ordered keys with hashes and robin hood hashing. use std::default::Default; use timely_sort::Unsigned; use ::hashable::{Hashable, HashOrdered}; use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder}; const MINIMUM_SHIFT : usize = 4; const BLOAT_FACTOR : f64 = 1.1; // I would like the trie entries to look like (Key, usize), where a usize equal to the // previous entry indicates that the location is empty. This would let us always use the // prior location to determine lower bounds, rather than double up upper and lower bounds // in Entry. // // It might also be good to optimistically build the hash map in place. We can do this by // upper bounding the number of keys, allocating and placing as if this many, and then // drawing down the allocation and placements if many keys collided or cancelled. /// A level of the trie, with keys and offsets into a lower layer. /// /// If keys[i].1 == 0 then entry i should /// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost /// of requiring `K: Default` to populate empty keys. /// /// Each region of this layer is an independent immutable RHH map, whose size should /// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)` /// elements are where we expect to find keys, and the remaining `i` are for spill-over /// due to collisions near the end of the first region. /// /// We might do something like "if X or fewer elements, just use an ordered list". #[derive(Debug)] pub struct HashedLayer<K: HashOrdered, L> { /// Keys and offsets for the keys. pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard. /// A lower layer containing ranges of values. pub vals: L, } impl<K: HashOrdered, L> HashedLayer<K, L> { fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() } fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() } fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> { type Item = (K, L::Item); type Cursor = HashedCursor<L>; type MergeBuilder = HashedBuilder<K, L::MergeBuilder>; type TupleBuilder = HashedBuilder<K, L::TupleBuilder>; fn keys(&self) -> usize { self.keys.len() } fn tuples(&self) -> usize { self.vals.tuples() } fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor { if lower < upper { let mut shift = 0; while upper - lower >= (1 << shift) { shift += 1; } shift -= 1; let mut pos = lower; // set self.pos to something valid. while pos < upper && !self.keys[pos].is_some() { pos += 1; } HashedCursor { shift: shift, bounds: (lower, upper), pos: pos, // keys: owned_self.clone().map(|x| &x.keys[..]), child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper()) } } else { HashedCursor { shift: 0, bounds: (0, 0), pos: 0, // keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys, child: self.vals.cursor_from(0, 0), } } } } /// An entry in hash tables. #[derive(Debug, Clone)] pub struct Entry<K: HashOrdered> { /// The contained key. key: K, lower1: u32, upper1: u32, } impl<K: HashOrdered> Entry<K> { fn new(key: K, lower: usize, upper: usize) -> Self { Entry { key: key, lower1: lower as u32, upper1: upper as u32, } } // fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) } fn is_some(&self) -> bool { self.upper1 != 0 } fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) } fn get_lower(&self) -> usize { self.lower1 as usize} fn get_upper(&self) -> usize { self.upper1 as usize} fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; } fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; } } /// Assembles a layer of this pub struct HashedBuilder<K: HashOrdered, L> { temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys. /// Entries in the hash map. pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast. /// A builder for the layer below. pub vals: L, } impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> { #[inline] fn _lower(&self, index: usize) -> usize { self.keys[index].get_lower() } #[inline] fn _upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> { type Trie = HashedLayer<K, L::Trie>; /// Looks at the contents of self.temp and extends self.keys appropriately. /// /// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were /// committed to self.temp, where they awaited layout. That now happens here. fn boundary(&mut self) -> usize { /// self.temp *should* be sorted by (hash, key); let's check! debug_assert!((1 .. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key)); let boundary = self.vals.boundary(); if self.temp.len() > 0 { // push doesn't know the length at the end; must write it if !self.temp[self.temp.len()-1].is_some() { let pos = self.temp.len()-1; self.temp[pos].set_upper(boundary); } // having densely packed everything, we now want to extend the allocation and rewrite the contents // so that their spacing is in line with how robin hood hashing works. let lower = self.keys.len(); if self.temp.len() < (1 << MINIMUM_SHIFT) { self.keys.extend(self.temp.drain(..)); } else { let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64; let mut shift = MINIMUM_SHIFT; while (1 << shift) < target { shift += 1; } self.keys.reserve(1 << shift); // now going to start pushing things in to self.keys let mut cursor: usize = 0; // <-- current write pos in self.keys. for entry in self.temp.drain(..) { // acquire top `shift` bits from `key.hashed()` let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize; debug_assert!(target < (1 << shift)); while cursor < target { // filling with bogus stuff self.keys.push(Entry::empty()); cursor += 1; } self.keys.push(entry); cursor += 1; } // fill out the space, if not full. while cursor < (1 << shift) { self.keys.push(Entry::empty()); cursor += 1; } // assert that we haven't doubled the allocation (would confuse the "what is shift?" logic) assert!((self.keys.len() - lower) < (2 << shift)); } } self.keys.len() } #[inline(never)] fn done(mut self) -> Self::Trie { self.boundary(); self.keys.shrink_to_fit(); let vals = self.vals.done(); if vals.tuples() > 0 { assert!(self.keys.len() > 0); } HashedLayer { keys: self.keys, vals: vals, } } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> { fn with_capacity(other1: &Self::Trie, other2: &Self::Trie) -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::with_capacity(other1.keys() + other2.keys()), vals: L::with_capacity(&other1.vals, &other2.vals), } } /// Copies fully formed ranges (note plural) of keys from another trie. /// /// While the ranges are fully formed, the offsets in them are relative to the other trie, and /// must be corrected. These keys must be moved immediately to self.keys, as there is no info /// about boundaries between them, and we are unable to lay out the info any differently. fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) { if lower < upper
} fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize { // just rebinding names to clarify code. let (trie1, mut lower1, upper1) = other1; let (trie2, mut lower2, upper2) = other2; debug_assert!(upper1 <= trie1.keys.len()); debug_assert!(upper2 <= trie2.keys.len()); self.temp.reserve((upper1 - lower1) + (upper2 - lower2)); while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } // while both mergees are still active while lower1 < upper1 && lower2 < upper2 { debug_assert!(trie1.keys[lower1].is_some()); debug_assert!(trie2.keys[lower2].is_some()); match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) { ::std::cmp::Ordering::Less => { lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key); } ::std::cmp::Ordering::Equal => { let lower = self.vals.boundary(); let upper = self.vals.push_merge( (&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)), (&trie2.vals, trie2.lower(lower2), trie2.upper(lower2)) ); if upper > lower { self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper)); } lower1 += 1; lower2 += 1; while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } } ::std::cmp::Ordering::Greater => { lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key); } } } if lower1 < upper1 { self.push_all(trie1, lower1, upper1); } if lower2 < upper2 { self.push_all(trie2, lower2, upper2); } self.boundary() } } impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> { type Item = (K, L::Item); fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } } fn with_capacity(cap: usize) -> Self { HashedBuilder { temp: Vec::with_capacity(cap), keys: Vec::with_capacity(cap), vals: L::with_capacity(cap), } } #[inline] fn push_tuple(&mut self, (key, val): (K, L::Item)) { // we build up self.temp, and rely on self.boundary() to drain self.temp. let temp_len = self.temp.len(); if temp_len == 0 || self.temp[temp_len-1].key != key { if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); } let boundary = self.vals.boundary(); if temp_len > 0 { self.temp[temp_len-1].set_upper(boundary); } self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary? } self.vals.push_tuple(val); } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> { /// Moves other stuff into self.temp. Returns number of element consumed. fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper let mut index = lower; // let vs_hashed = vs.hashed(); // stop if overrun, or if we find a valid element >= our target. while index < upper && !(other.keys[index].is_some() && &other.keys[index].key >= vs) { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } debug_assert!(other.lower(index) < other.upper(index)); let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } index += 1; } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); index - lower } fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) { debug_assert!(lower < upper); debug_assert!(upper <= other.keys.len()); let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper for index in lower .. upper { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); } } /// A cursor with a child cursor that is updated as we move. #[derive(Debug)] pub struct HashedCursor<L: Trie> { shift: usize, // amount by which to shift hashes. bounds: (usize, usize), // bounds of slice of self.keys. pos: usize, // <-- current cursor position. /// A cursor for the layer below this one. pub child: L::Cursor, } impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> { type Key = K; fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key } fn step(&mut self, storage: &HashedLayer<K, L>) { // look for next valid entry self.pos += 1; while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { let child_lower = storage.keys[self.pos].get_lower(); let child_upper = storage.keys[self.pos].get_upper(); self.child.reposition(&storage.vals, child_lower, child_upper); } else { self.pos = self.bounds.1; } } #[inline(never)] fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) { // leap to where the key *should* be, or at least be soon after. // let key_hash = key.hashed(); // only update position if shift is large. otherwise leave it alone. if self.shift >= MINIMUM_SHIFT { let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize; self.pos = target; } // scan forward until we find a valid entry >= (key_hash, key) while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) { self.pos += 1; } // self.pos should now either // (i) have self.pos == self.bounds.1 (and be invalid) or // (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key). if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 } fn rewind(&mut self, storage: &HashedLayer<K, L>) { self.pos = self.bounds.0; if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) { // sort out what the shift is. // should be just before the first power of two strictly containing (lower, upper]. self.shift = 0; while upper - lower >= (1 << self.shift) { self.shift += 1; } self.shift -= 1; self.bounds = (lower, upper); self.pos = lower; // set self.pos to something valid. while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } }
{ let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. for index in lower .. upper { let other_entry = &other.keys[index]; let new_entry = if other_entry.is_some() { Entry::new( other_entry.key.clone(), (other_entry.get_lower() + self_basis) - other_basis, (other_entry.get_upper() + self_basis) - other_basis, ) } else { Entry::empty() }; self.keys.push(new_entry); } self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1)); self.boundary(); // <-- perhaps unnecessary, but ... }
conditional_block
hashed.rs
//! Implementation using ordered keys with hashes and robin hood hashing. use std::default::Default; use timely_sort::Unsigned; use ::hashable::{Hashable, HashOrdered}; use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder}; const MINIMUM_SHIFT : usize = 4; const BLOAT_FACTOR : f64 = 1.1; // I would like the trie entries to look like (Key, usize), where a usize equal to the // previous entry indicates that the location is empty. This would let us always use the // prior location to determine lower bounds, rather than double up upper and lower bounds // in Entry. // // It might also be good to optimistically build the hash map in place. We can do this by // upper bounding the number of keys, allocating and placing as if this many, and then // drawing down the allocation and placements if many keys collided or cancelled. /// A level of the trie, with keys and offsets into a lower layer. /// /// If keys[i].1 == 0 then entry i should /// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost /// of requiring `K: Default` to populate empty keys. /// /// Each region of this layer is an independent immutable RHH map, whose size should /// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)` /// elements are where we expect to find keys, and the remaining `i` are for spill-over /// due to collisions near the end of the first region. /// /// We might do something like "if X or fewer elements, just use an ordered list". #[derive(Debug)] pub struct HashedLayer<K: HashOrdered, L> { /// Keys and offsets for the keys. pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard. /// A lower layer containing ranges of values. pub vals: L, } impl<K: HashOrdered, L> HashedLayer<K, L> { fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() } fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() } fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> { type Item = (K, L::Item); type Cursor = HashedCursor<L>; type MergeBuilder = HashedBuilder<K, L::MergeBuilder>; type TupleBuilder = HashedBuilder<K, L::TupleBuilder>; fn keys(&self) -> usize { self.keys.len() } fn tuples(&self) -> usize { self.vals.tuples() } fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor { if lower < upper { let mut shift = 0; while upper - lower >= (1 << shift) { shift += 1; } shift -= 1; let mut pos = lower; // set self.pos to something valid. while pos < upper && !self.keys[pos].is_some() { pos += 1; } HashedCursor { shift: shift, bounds: (lower, upper), pos: pos, // keys: owned_self.clone().map(|x| &x.keys[..]), child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper()) } } else { HashedCursor { shift: 0, bounds: (0, 0), pos: 0, // keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys, child: self.vals.cursor_from(0, 0), } } } } /// An entry in hash tables. #[derive(Debug, Clone)] pub struct Entry<K: HashOrdered> { /// The contained key. key: K, lower1: u32, upper1: u32, } impl<K: HashOrdered> Entry<K> { fn new(key: K, lower: usize, upper: usize) -> Self { Entry { key: key, lower1: lower as u32, upper1: upper as u32, } } // fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) } fn is_some(&self) -> bool { self.upper1 != 0 } fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) } fn get_lower(&self) -> usize { self.lower1 as usize} fn get_upper(&self) -> usize { self.upper1 as usize} fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; } fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; } } /// Assembles a layer of this pub struct HashedBuilder<K: HashOrdered, L> { temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys. /// Entries in the hash map. pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast. /// A builder for the layer below. pub vals: L, } impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> { #[inline] fn _lower(&self, index: usize) -> usize { self.keys[index].get_lower() } #[inline] fn _upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> { type Trie = HashedLayer<K, L::Trie>; /// Looks at the contents of self.temp and extends self.keys appropriately. /// /// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were /// committed to self.temp, where they awaited layout. That now happens here. fn boundary(&mut self) -> usize { /// self.temp *should* be sorted by (hash, key); let's check! debug_assert!((1 .. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key)); let boundary = self.vals.boundary(); if self.temp.len() > 0 { // push doesn't know the length at the end; must write it if !self.temp[self.temp.len()-1].is_some() { let pos = self.temp.len()-1; self.temp[pos].set_upper(boundary); } // having densely packed everything, we now want to extend the allocation and rewrite the contents // so that their spacing is in line with how robin hood hashing works. let lower = self.keys.len(); if self.temp.len() < (1 << MINIMUM_SHIFT) { self.keys.extend(self.temp.drain(..)); } else { let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64; let mut shift = MINIMUM_SHIFT; while (1 << shift) < target { shift += 1; } self.keys.reserve(1 << shift); // now going to start pushing things in to self.keys let mut cursor: usize = 0; // <-- current write pos in self.keys. for entry in self.temp.drain(..) { // acquire top `shift` bits from `key.hashed()` let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize; debug_assert!(target < (1 << shift)); while cursor < target { // filling with bogus stuff self.keys.push(Entry::empty()); cursor += 1; } self.keys.push(entry); cursor += 1; } // fill out the space, if not full. while cursor < (1 << shift) { self.keys.push(Entry::empty()); cursor += 1; } // assert that we haven't doubled the allocation (would confuse the "what is shift?" logic) assert!((self.keys.len() - lower) < (2 << shift)); } } self.keys.len() } #[inline(never)] fn done(mut self) -> Self::Trie { self.boundary(); self.keys.shrink_to_fit(); let vals = self.vals.done(); if vals.tuples() > 0 { assert!(self.keys.len() > 0); } HashedLayer { keys: self.keys, vals: vals, } } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> { fn with_capacity(other1: &Self::Trie, other2: &Self::Trie) -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::with_capacity(other1.keys() + other2.keys()), vals: L::with_capacity(&other1.vals, &other2.vals), } } /// Copies fully formed ranges (note plural) of keys from another trie. /// /// While the ranges are fully formed, the offsets in them are relative to the other trie, and /// must be corrected. These keys must be moved immediately to self.keys, as there is no info /// about boundaries between them, and we are unable to lay out the info any differently. fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) { if lower < upper { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. for index in lower .. upper { let other_entry = &other.keys[index]; let new_entry = if other_entry.is_some() { Entry::new( other_entry.key.clone(), (other_entry.get_lower() + self_basis) - other_basis, (other_entry.get_upper() + self_basis) - other_basis, ) } else { Entry::empty() }; self.keys.push(new_entry); } self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1)); self.boundary(); // <-- perhaps unnecessary, but ... } } fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize { // just rebinding names to clarify code. let (trie1, mut lower1, upper1) = other1; let (trie2, mut lower2, upper2) = other2; debug_assert!(upper1 <= trie1.keys.len()); debug_assert!(upper2 <= trie2.keys.len()); self.temp.reserve((upper1 - lower1) + (upper2 - lower2)); while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } // while both mergees are still active while lower1 < upper1 && lower2 < upper2 { debug_assert!(trie1.keys[lower1].is_some()); debug_assert!(trie2.keys[lower2].is_some()); match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) { ::std::cmp::Ordering::Less => { lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key); } ::std::cmp::Ordering::Equal => { let lower = self.vals.boundary(); let upper = self.vals.push_merge( (&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)), (&trie2.vals, trie2.lower(lower2), trie2.upper(lower2)) ); if upper > lower { self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper)); } lower1 += 1; lower2 += 1; while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } } ::std::cmp::Ordering::Greater => { lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key); } } } if lower1 < upper1 { self.push_all(trie1, lower1, upper1); } if lower2 < upper2 { self.push_all(trie2, lower2, upper2); } self.boundary() } } impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> { type Item = (K, L::Item); fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } } fn with_capacity(cap: usize) -> Self
#[inline] fn push_tuple(&mut self, (key, val): (K, L::Item)) { // we build up self.temp, and rely on self.boundary() to drain self.temp. let temp_len = self.temp.len(); if temp_len == 0 || self.temp[temp_len-1].key != key { if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); } let boundary = self.vals.boundary(); if temp_len > 0 { self.temp[temp_len-1].set_upper(boundary); } self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary? } self.vals.push_tuple(val); } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> { /// Moves other stuff into self.temp. Returns number of element consumed. fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper let mut index = lower; // let vs_hashed = vs.hashed(); // stop if overrun, or if we find a valid element >= our target. while index < upper && !(other.keys[index].is_some() && &other.keys[index].key >= vs) { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } debug_assert!(other.lower(index) < other.upper(index)); let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } index += 1; } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); index - lower } fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) { debug_assert!(lower < upper); debug_assert!(upper <= other.keys.len()); let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper for index in lower .. upper { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); } } /// A cursor with a child cursor that is updated as we move. #[derive(Debug)] pub struct HashedCursor<L: Trie> { shift: usize, // amount by which to shift hashes. bounds: (usize, usize), // bounds of slice of self.keys. pos: usize, // <-- current cursor position. /// A cursor for the layer below this one. pub child: L::Cursor, } impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> { type Key = K; fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key } fn step(&mut self, storage: &HashedLayer<K, L>) { // look for next valid entry self.pos += 1; while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { let child_lower = storage.keys[self.pos].get_lower(); let child_upper = storage.keys[self.pos].get_upper(); self.child.reposition(&storage.vals, child_lower, child_upper); } else { self.pos = self.bounds.1; } } #[inline(never)] fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) { // leap to where the key *should* be, or at least be soon after. // let key_hash = key.hashed(); // only update position if shift is large. otherwise leave it alone. if self.shift >= MINIMUM_SHIFT { let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize; self.pos = target; } // scan forward until we find a valid entry >= (key_hash, key) while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) { self.pos += 1; } // self.pos should now either // (i) have self.pos == self.bounds.1 (and be invalid) or // (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key). if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 } fn rewind(&mut self, storage: &HashedLayer<K, L>) { self.pos = self.bounds.0; if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) { // sort out what the shift is. // should be just before the first power of two strictly containing (lower, upper]. self.shift = 0; while upper - lower >= (1 << self.shift) { self.shift += 1; } self.shift -= 1; self.bounds = (lower, upper); self.pos = lower; // set self.pos to something valid. while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } }
{ HashedBuilder { temp: Vec::with_capacity(cap), keys: Vec::with_capacity(cap), vals: L::with_capacity(cap), } }
identifier_body
hashed.rs
//! Implementation using ordered keys with hashes and robin hood hashing. use std::default::Default; use timely_sort::Unsigned; use ::hashable::{Hashable, HashOrdered}; use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder}; const MINIMUM_SHIFT : usize = 4; const BLOAT_FACTOR : f64 = 1.1; // I would like the trie entries to look like (Key, usize), where a usize equal to the // previous entry indicates that the location is empty. This would let us always use the // prior location to determine lower bounds, rather than double up upper and lower bounds // in Entry. // // It might also be good to optimistically build the hash map in place. We can do this by // upper bounding the number of keys, allocating and placing as if this many, and then // drawing down the allocation and placements if many keys collided or cancelled. /// A level of the trie, with keys and offsets into a lower layer. /// /// If keys[i].1 == 0 then entry i should /// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost /// of requiring `K: Default` to populate empty keys. /// /// Each region of this layer is an independent immutable RHH map, whose size should /// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)` /// elements are where we expect to find keys, and the remaining `i` are for spill-over /// due to collisions near the end of the first region. /// /// We might do something like "if X or fewer elements, just use an ordered list". #[derive(Debug)] pub struct HashedLayer<K: HashOrdered, L> { /// Keys and offsets for the keys. pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard. /// A lower layer containing ranges of values. pub vals: L, } impl<K: HashOrdered, L> HashedLayer<K, L> { fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() } fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() } fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> { type Item = (K, L::Item); type Cursor = HashedCursor<L>; type MergeBuilder = HashedBuilder<K, L::MergeBuilder>; type TupleBuilder = HashedBuilder<K, L::TupleBuilder>; fn keys(&self) -> usize { self.keys.len() } fn tuples(&self) -> usize { self.vals.tuples() } fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor { if lower < upper { let mut shift = 0; while upper - lower >= (1 << shift) { shift += 1; } shift -= 1; let mut pos = lower; // set self.pos to something valid. while pos < upper && !self.keys[pos].is_some() { pos += 1; } HashedCursor { shift: shift, bounds: (lower, upper), pos: pos, // keys: owned_self.clone().map(|x| &x.keys[..]), child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper()) } } else { HashedCursor { shift: 0, bounds: (0, 0), pos: 0, // keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys, child: self.vals.cursor_from(0, 0), } } } } /// An entry in hash tables. #[derive(Debug, Clone)] pub struct Entry<K: HashOrdered> { /// The contained key. key: K, lower1: u32, upper1: u32, } impl<K: HashOrdered> Entry<K> { fn new(key: K, lower: usize, upper: usize) -> Self { Entry { key: key, lower1: lower as u32, upper1: upper as u32, } } // fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) } fn is_some(&self) -> bool { self.upper1 != 0 } fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) } fn get_lower(&self) -> usize { self.lower1 as usize} fn get_upper(&self) -> usize { self.upper1 as usize} fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; } fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; } } /// Assembles a layer of this pub struct HashedBuilder<K: HashOrdered, L> { temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys. /// Entries in the hash map. pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast. /// A builder for the layer below. pub vals: L, } impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> { #[inline] fn _lower(&self, index: usize) -> usize { self.keys[index].get_lower() } #[inline] fn _upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> { type Trie = HashedLayer<K, L::Trie>; /// Looks at the contents of self.temp and extends self.keys appropriately. /// /// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were /// committed to self.temp, where they awaited layout. That now happens here. fn boundary(&mut self) -> usize { /// self.temp *should* be sorted by (hash, key); let's check! debug_assert!((1 .. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key)); let boundary = self.vals.boundary(); if self.temp.len() > 0 { // push doesn't know the length at the end; must write it if !self.temp[self.temp.len()-1].is_some() { let pos = self.temp.len()-1; self.temp[pos].set_upper(boundary); } // having densely packed everything, we now want to extend the allocation and rewrite the contents // so that their spacing is in line with how robin hood hashing works. let lower = self.keys.len(); if self.temp.len() < (1 << MINIMUM_SHIFT) { self.keys.extend(self.temp.drain(..)); } else { let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64; let mut shift = MINIMUM_SHIFT; while (1 << shift) < target { shift += 1; } self.keys.reserve(1 << shift); // now going to start pushing things in to self.keys let mut cursor: usize = 0; // <-- current write pos in self.keys. for entry in self.temp.drain(..) { // acquire top `shift` bits from `key.hashed()` let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize; debug_assert!(target < (1 << shift)); while cursor < target { // filling with bogus stuff self.keys.push(Entry::empty()); cursor += 1; } self.keys.push(entry); cursor += 1; } // fill out the space, if not full. while cursor < (1 << shift) { self.keys.push(Entry::empty()); cursor += 1; } // assert that we haven't doubled the allocation (would confuse the "what is shift?" logic) assert!((self.keys.len() - lower) < (2 << shift)); } } self.keys.len() } #[inline(never)] fn done(mut self) -> Self::Trie { self.boundary(); self.keys.shrink_to_fit(); let vals = self.vals.done(); if vals.tuples() > 0 { assert!(self.keys.len() > 0); } HashedLayer { keys: self.keys, vals: vals, } } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> { fn
(other1: &Self::Trie, other2: &Self::Trie) -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::with_capacity(other1.keys() + other2.keys()), vals: L::with_capacity(&other1.vals, &other2.vals), } } /// Copies fully formed ranges (note plural) of keys from another trie. /// /// While the ranges are fully formed, the offsets in them are relative to the other trie, and /// must be corrected. These keys must be moved immediately to self.keys, as there is no info /// about boundaries between them, and we are unable to lay out the info any differently. fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) { if lower < upper { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. for index in lower .. upper { let other_entry = &other.keys[index]; let new_entry = if other_entry.is_some() { Entry::new( other_entry.key.clone(), (other_entry.get_lower() + self_basis) - other_basis, (other_entry.get_upper() + self_basis) - other_basis, ) } else { Entry::empty() }; self.keys.push(new_entry); } self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1)); self.boundary(); // <-- perhaps unnecessary, but ... } } fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize { // just rebinding names to clarify code. let (trie1, mut lower1, upper1) = other1; let (trie2, mut lower2, upper2) = other2; debug_assert!(upper1 <= trie1.keys.len()); debug_assert!(upper2 <= trie2.keys.len()); self.temp.reserve((upper1 - lower1) + (upper2 - lower2)); while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } // while both mergees are still active while lower1 < upper1 && lower2 < upper2 { debug_assert!(trie1.keys[lower1].is_some()); debug_assert!(trie2.keys[lower2].is_some()); match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) { ::std::cmp::Ordering::Less => { lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key); } ::std::cmp::Ordering::Equal => { let lower = self.vals.boundary(); let upper = self.vals.push_merge( (&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)), (&trie2.vals, trie2.lower(lower2), trie2.upper(lower2)) ); if upper > lower { self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper)); } lower1 += 1; lower2 += 1; while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } } ::std::cmp::Ordering::Greater => { lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key); } } } if lower1 < upper1 { self.push_all(trie1, lower1, upper1); } if lower2 < upper2 { self.push_all(trie2, lower2, upper2); } self.boundary() } } impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> { type Item = (K, L::Item); fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } } fn with_capacity(cap: usize) -> Self { HashedBuilder { temp: Vec::with_capacity(cap), keys: Vec::with_capacity(cap), vals: L::with_capacity(cap), } } #[inline] fn push_tuple(&mut self, (key, val): (K, L::Item)) { // we build up self.temp, and rely on self.boundary() to drain self.temp. let temp_len = self.temp.len(); if temp_len == 0 || self.temp[temp_len-1].key != key { if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); } let boundary = self.vals.boundary(); if temp_len > 0 { self.temp[temp_len-1].set_upper(boundary); } self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary? } self.vals.push_tuple(val); } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> { /// Moves other stuff into self.temp. Returns number of element consumed. fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper let mut index = lower; // let vs_hashed = vs.hashed(); // stop if overrun, or if we find a valid element >= our target. while index < upper && !(other.keys[index].is_some() && &other.keys[index].key >= vs) { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } debug_assert!(other.lower(index) < other.upper(index)); let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } index += 1; } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); index - lower } fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) { debug_assert!(lower < upper); debug_assert!(upper <= other.keys.len()); let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper for index in lower .. upper { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); } } /// A cursor with a child cursor that is updated as we move. #[derive(Debug)] pub struct HashedCursor<L: Trie> { shift: usize, // amount by which to shift hashes. bounds: (usize, usize), // bounds of slice of self.keys. pos: usize, // <-- current cursor position. /// A cursor for the layer below this one. pub child: L::Cursor, } impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> { type Key = K; fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key } fn step(&mut self, storage: &HashedLayer<K, L>) { // look for next valid entry self.pos += 1; while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { let child_lower = storage.keys[self.pos].get_lower(); let child_upper = storage.keys[self.pos].get_upper(); self.child.reposition(&storage.vals, child_lower, child_upper); } else { self.pos = self.bounds.1; } } #[inline(never)] fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) { // leap to where the key *should* be, or at least be soon after. // let key_hash = key.hashed(); // only update position if shift is large. otherwise leave it alone. if self.shift >= MINIMUM_SHIFT { let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize; self.pos = target; } // scan forward until we find a valid entry >= (key_hash, key) while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) { self.pos += 1; } // self.pos should now either // (i) have self.pos == self.bounds.1 (and be invalid) or // (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key). if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 } fn rewind(&mut self, storage: &HashedLayer<K, L>) { self.pos = self.bounds.0; if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) { // sort out what the shift is. // should be just before the first power of two strictly containing (lower, upper]. self.shift = 0; while upper - lower >= (1 << self.shift) { self.shift += 1; } self.shift -= 1; self.bounds = (lower, upper); self.pos = lower; // set self.pos to something valid. while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } }
with_capacity
identifier_name
hashed.rs
//! Implementation using ordered keys with hashes and robin hood hashing. use std::default::Default; use timely_sort::Unsigned; use ::hashable::{Hashable, HashOrdered}; use super::{Trie, Cursor, Builder, MergeBuilder, TupleBuilder}; const MINIMUM_SHIFT : usize = 4; const BLOAT_FACTOR : f64 = 1.1; // I would like the trie entries to look like (Key, usize), where a usize equal to the // previous entry indicates that the location is empty. This would let us always use the // prior location to determine lower bounds, rather than double up upper and lower bounds // in Entry. // // It might also be good to optimistically build the hash map in place. We can do this by // upper bounding the number of keys, allocating and placing as if this many, and then // drawing down the allocation and placements if many keys collided or cancelled. /// A level of the trie, with keys and offsets into a lower layer. /// /// If keys[i].1 == 0 then entry i should /// be ignored. This is our version of `Option<(K, usize)>`, which comes at the cost /// of requiring `K: Default` to populate empty keys. /// /// Each region of this layer is an independent immutable RHH map, whose size should /// equal something like `(1 << i) + i` for some value of `i`. The first `(1 << i)` /// elements are where we expect to find keys, and the remaining `i` are for spill-over /// due to collisions near the end of the first region. /// /// We might do something like "if X or fewer elements, just use an ordered list". #[derive(Debug)] pub struct HashedLayer<K: HashOrdered, L> { /// Keys and offsets for the keys. pub keys: Vec<Entry<K>>, // track upper and lower bounds, because trickery is hard. /// A lower layer containing ranges of values. pub vals: L, } impl<K: HashOrdered, L> HashedLayer<K, L> { fn _entry_valid(&self, index: usize) -> bool { self.keys[index].is_some() } fn lower(&self, index: usize) -> usize { self.keys[index].get_lower() } fn upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: Clone+HashOrdered+Default, L: Trie> Trie for HashedLayer<K, L> { type Item = (K, L::Item); type Cursor = HashedCursor<L>; type MergeBuilder = HashedBuilder<K, L::MergeBuilder>; type TupleBuilder = HashedBuilder<K, L::TupleBuilder>; fn keys(&self) -> usize { self.keys.len() } fn tuples(&self) -> usize { self.vals.tuples() } fn cursor_from(&self, lower: usize, upper: usize) -> Self::Cursor { if lower < upper { let mut shift = 0; while upper - lower >= (1 << shift) { shift += 1; } shift -= 1; let mut pos = lower; // set self.pos to something valid. while pos < upper && !self.keys[pos].is_some() { pos += 1; } HashedCursor { shift: shift, bounds: (lower, upper), pos: pos, // keys: owned_self.clone().map(|x| &x.keys[..]), child: self.vals.cursor_from(self.keys[pos].get_lower(), self.keys[pos].get_upper()) } } else { HashedCursor { shift: 0, bounds: (0, 0), pos: 0, // keys: owned_self.clone().map(|x| &x.keys[..]), // &self.keys, child: self.vals.cursor_from(0, 0), } } } } /// An entry in hash tables. #[derive(Debug, Clone)] pub struct Entry<K: HashOrdered> { /// The contained key. key: K, lower1: u32, upper1: u32, } impl<K: HashOrdered> Entry<K> { fn new(key: K, lower: usize, upper: usize) -> Self { Entry { key: key, lower1: lower as u32, upper1: upper as u32, } } // fn for_cmp(&self) -> (K::Output, &K) { (self.key.hashed(), &self.key) } fn is_some(&self) -> bool { self.upper1 != 0 } fn empty() -> Self where K: Default { Self::new(Default::default(), 0, 0) } fn get_lower(&self) -> usize { self.lower1 as usize} fn get_upper(&self) -> usize { self.upper1 as usize} fn _set_lower(&mut self, x: usize) { self.lower1 = x as u32; } fn set_upper(&mut self, x: usize) { self.upper1 = x as u32; } } /// Assembles a layer of this pub struct HashedBuilder<K: HashOrdered, L> { temp: Vec<Entry<K>>, // staging for building; densely packed here and then re-laid out in self.keys. /// Entries in the hash map. pub keys: Vec<Entry<K>>, // keys and offs co-located because we expect to find the right answers fast. /// A builder for the layer below. pub vals: L, } impl<K: HashOrdered+Clone+Default, L> HashedBuilder<K, L> { #[inline] fn _lower(&self, index: usize) -> usize { self.keys[index].get_lower() } #[inline] fn _upper(&self, index: usize) -> usize { self.keys[index].get_upper() } } impl<K: HashOrdered+Clone+Default, L: Builder> Builder for HashedBuilder<K, L> { type Trie = HashedLayer<K, L::Trie>; /// Looks at the contents of self.temp and extends self.keys appropriately. /// /// This is where the "hash map" structure is produced. Up until this point, all (key, usize) pairs were /// committed to self.temp, where they awaited layout. That now happens here. fn boundary(&mut self) -> usize { /// self.temp *should* be sorted by (hash, key); let's check! debug_assert!((1 .. self.temp.len()).all(|i| self.temp[i-1].key < self.temp[i].key)); let boundary = self.vals.boundary(); if self.temp.len() > 0 { // push doesn't know the length at the end; must write it if !self.temp[self.temp.len()-1].is_some() { let pos = self.temp.len()-1; self.temp[pos].set_upper(boundary); } // having densely packed everything, we now want to extend the allocation and rewrite the contents // so that their spacing is in line with how robin hood hashing works. let lower = self.keys.len(); if self.temp.len() < (1 << MINIMUM_SHIFT) { self.keys.extend(self.temp.drain(..)); } else { let target = (BLOAT_FACTOR * (self.temp.len() as f64)) as u64; let mut shift = MINIMUM_SHIFT; while (1 << shift) < target { shift += 1; } self.keys.reserve(1 << shift); // now going to start pushing things in to self.keys let mut cursor: usize = 0; // <-- current write pos in self.keys. for entry in self.temp.drain(..) { // acquire top `shift` bits from `key.hashed()` let target = (entry.key.hashed().as_u64() >> ((<K as Hashable>::Output::bytes() * 8) - shift)) as usize; debug_assert!(target < (1 << shift)); while cursor < target { // filling with bogus stuff self.keys.push(Entry::empty()); cursor += 1; } self.keys.push(entry); cursor += 1; } // fill out the space, if not full. while cursor < (1 << shift) { self.keys.push(Entry::empty()); cursor += 1; } // assert that we haven't doubled the allocation (would confuse the "what is shift?" logic) assert!((self.keys.len() - lower) < (2 << shift)); } } self.keys.len() } #[inline(never)] fn done(mut self) -> Self::Trie { self.boundary(); self.keys.shrink_to_fit(); let vals = self.vals.done(); if vals.tuples() > 0 { assert!(self.keys.len() > 0); } HashedLayer { keys: self.keys, vals: vals, } } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> MergeBuilder for HashedBuilder<K, L> { fn with_capacity(other1: &Self::Trie, other2: &Self::Trie) -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::with_capacity(other1.keys() + other2.keys()), vals: L::with_capacity(&other1.vals, &other2.vals), } } /// Copies fully formed ranges (note plural) of keys from another trie. /// /// While the ranges are fully formed, the offsets in them are relative to the other trie, and /// must be corrected. These keys must be moved immediately to self.keys, as there is no info /// about boundaries between them, and we are unable to lay out the info any differently. fn copy_range(&mut self, other: &Self::Trie, lower: usize, upper: usize) { if lower < upper { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. for index in lower .. upper { let other_entry = &other.keys[index]; let new_entry = if other_entry.is_some() { Entry::new( other_entry.key.clone(), (other_entry.get_lower() + self_basis) - other_basis, (other_entry.get_upper() + self_basis) - other_basis, ) } else { Entry::empty() }; self.keys.push(new_entry); } self.vals.copy_range(&other.vals, other.lower(lower), other.upper(upper-1)); self.boundary(); // <-- perhaps unnecessary, but ... } } fn push_merge(&mut self, other1: (&Self::Trie, usize, usize), other2: (&Self::Trie, usize, usize)) -> usize { // just rebinding names to clarify code. let (trie1, mut lower1, upper1) = other1; let (trie2, mut lower2, upper2) = other2; debug_assert!(upper1 <= trie1.keys.len()); debug_assert!(upper2 <= trie2.keys.len()); self.temp.reserve((upper1 - lower1) + (upper2 - lower2)); while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } // while both mergees are still active while lower1 < upper1 && lower2 < upper2 { debug_assert!(trie1.keys[lower1].is_some()); debug_assert!(trie2.keys[lower2].is_some()); match trie1.keys[lower1].key.cmp(&trie2.keys[lower2].key) { ::std::cmp::Ordering::Less => { lower1 += self.push_while_less(trie1, lower1, upper1, &trie2.keys[lower2].key); } ::std::cmp::Ordering::Equal => { let lower = self.vals.boundary(); let upper = self.vals.push_merge( (&trie1.vals, trie1.lower(lower1), trie1.upper(lower1)), (&trie2.vals, trie2.lower(lower2), trie2.upper(lower2)) ); if upper > lower { self.temp.push(Entry::new(trie1.keys[lower1].key.clone(), lower, upper)); } lower1 += 1; lower2 += 1; while lower1 < trie1.keys.len() && !trie1.keys[lower1].is_some() { lower1 += 1; } while lower2 < trie2.keys.len() && !trie2.keys[lower2].is_some() { lower2 += 1; } } ::std::cmp::Ordering::Greater => { lower2 += self.push_while_less(trie2, lower2, upper2, &trie1.keys[lower1].key); } } } if lower1 < upper1 { self.push_all(trie1, lower1, upper1); } if lower2 < upper2 { self.push_all(trie2, lower2, upper2); } self.boundary() } } impl<K: HashOrdered+Clone+Default, L: TupleBuilder> TupleBuilder for HashedBuilder<K, L> { type Item = (K, L::Item); fn new() -> Self { HashedBuilder { temp: Vec::new(), keys: Vec::new(), vals: L::new() } } fn with_capacity(cap: usize) -> Self { HashedBuilder { temp: Vec::with_capacity(cap), keys: Vec::with_capacity(cap), vals: L::with_capacity(cap), } } #[inline] fn push_tuple(&mut self, (key, val): (K, L::Item)) { // we build up self.temp, and rely on self.boundary() to drain self.temp. let temp_len = self.temp.len(); if temp_len == 0 || self.temp[temp_len-1].key != key { if temp_len > 0 { debug_assert!(self.temp[temp_len-1].key < key); } let boundary = self.vals.boundary(); if temp_len > 0 { self.temp[temp_len-1].set_upper(boundary); } self.temp.push(Entry::new(key, boundary, 0)); // this should be fixed by boundary? } self.vals.push_tuple(val); } } impl<K: HashOrdered+Clone+Default, L: MergeBuilder> HashedBuilder<K, L> { /// Moves other stuff into self.temp. Returns number of element consumed. fn push_while_less(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize, vs: &K) -> usize { let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper let mut index = lower; // let vs_hashed = vs.hashed(); // stop if overrun, or if we find a valid element >= our target. while index < upper && !(other.keys[index].is_some() && &other.keys[index].key >= vs) { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } debug_assert!(other.lower(index) < other.upper(index)); let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } index += 1; } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); index - lower } fn push_all(&mut self, other: &HashedLayer<K, L::Trie>, lower: usize, upper: usize) { debug_assert!(lower < upper); debug_assert!(upper <= other.keys.len()); let other_basis = other.lower(lower); // from where in `other` the offsets do start. let self_basis = self.vals.boundary(); // from where in `self` the offsets must start. let mut bound = 0; // tracks largest value of upper for index in lower .. upper { if other.upper(index) != 0 { if bound < other.upper(index) { bound = other.upper(index); } let lower = (other.lower(index) + self_basis) - other_basis; let upper = (other.upper(index) + self_basis) - other_basis; self.temp.push(Entry::new(other.keys[index].key.clone(), lower, upper)); } } debug_assert!(bound > 0); self.vals.copy_range(&other.vals, other.lower(lower), bound); } } /// A cursor with a child cursor that is updated as we move. #[derive(Debug)] pub struct HashedCursor<L: Trie> { shift: usize, // amount by which to shift hashes. bounds: (usize, usize), // bounds of slice of self.keys. pos: usize, // <-- current cursor position. /// A cursor for the layer below this one. pub child: L::Cursor, } impl<K: HashOrdered, L: Trie> Cursor<HashedLayer<K, L>> for HashedCursor<L> { type Key = K; fn key<'a>(&self, storage: &'a HashedLayer<K, L>) -> &'a Self::Key { &storage.keys[self.pos].key } fn step(&mut self, storage: &HashedLayer<K, L>) { // look for next valid entry self.pos += 1; while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { let child_lower = storage.keys[self.pos].get_lower(); let child_upper = storage.keys[self.pos].get_upper(); self.child.reposition(&storage.vals, child_lower, child_upper);
else { self.pos = self.bounds.1; } } #[inline(never)] fn seek(&mut self, storage: &HashedLayer<K, L>, key: &Self::Key) { // leap to where the key *should* be, or at least be soon after. // let key_hash = key.hashed(); // only update position if shift is large. otherwise leave it alone. if self.shift >= MINIMUM_SHIFT { let target = (key.hashed().as_u64() >> ((K::Output::bytes() * 8) - self.shift)) as usize; self.pos = target; } // scan forward until we find a valid entry >= (key_hash, key) while self.pos < self.bounds.1 && (!storage.keys[self.pos].is_some() || &storage.keys[self.pos].key < key) { self.pos += 1; } // self.pos should now either // (i) have self.pos == self.bounds.1 (and be invalid) or // (ii) point at a valid entry with (entry_hash, entry) >= (key_hash, key). if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn valid(&self, _storage: &HashedLayer<K, L>) -> bool { self.pos < self.bounds.1 } fn rewind(&mut self, storage: &HashedLayer<K, L>) { self.pos = self.bounds.0; if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } fn reposition(&mut self, storage: &HashedLayer<K, L>, lower: usize, upper: usize) { // sort out what the shift is. // should be just before the first power of two strictly containing (lower, upper]. self.shift = 0; while upper - lower >= (1 << self.shift) { self.shift += 1; } self.shift -= 1; self.bounds = (lower, upper); self.pos = lower; // set self.pos to something valid. while self.pos < self.bounds.1 && !storage.keys[self.pos].is_some() { self.pos += 1; } if self.valid(storage) { self.child.reposition(&storage.vals, storage.keys[self.pos].get_lower(), storage.keys[self.pos].get_upper()); } } }
}
random_line_split
views.py
from django.shortcuts import render,redirect,HttpResponse from seller import models import os """用来加密的函数,要调用它""" import hashlib def pwd_jm(password): md5 = hashlib.md5() md5.update(password.encode()) result = md5.hexdigest() return result """3中校验方式,1.表单校验 2.装饰器校验 3.中间件校验""" """form表单校验register,归根到底算是前端,即前台""" from django import forms from django.forms import widgets#加提示语需要引入的模块 import time#时间戳需要引用的模块 class RegisterForm(forms.Form): username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder':'用户名','class':'layui-input'}) )#attrs里面的是input输入框里的属性,即placeholder属性和class属性 nickname = forms.CharField( label='昵称', required=True,#require的意思是不能为空,是一个校验条件,自动补出来了 min_length=3,#自己提示出来了,就不用再也错误信息了,前端给写好了 widget=widgets.TextInput(attrs={'placeholder':'昵称','calss':'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder':'密码','class':'layui-input'}) ) picture = forms.CharField( label='头像', required=True, widget=widgets.FileInput(attrs={'class':'layui-input'}) ) """第一步注册,为对应的后台""" def register(request): registerForm = RegisterForm()#弄一个form表单校验的对象 if request.method == 'POST': registerForm = RegisterForm(request.POST,request.FILES)#POST是校验普通字段,FILES是校验上传字段 if registerForm.is_valid(): #1.获取数据,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = registerForm.cleaned_data username = data.get('username') nickname = data.get('nickname') password = data.get('password') #picture=data.get('picture')如果这样获取的是图片名称 picture = request.FILES.get('picture')#获取的是图片对象 time_temp = time.time()#获取当前时间戳 #2.保存图片 path = 'static/touxiang/' + str(time_temp) + '_' + picture.name with open(path,mode='wb') as f: for content in picture.chunks(): f.write(content) #3.对密码加密 password = pwd_jm(password) #4.保存到数据库 models.Seller.objects.create( name = username, nickname = nickname, password = password, picture = 'touxiang/'+str(time_temp) + '_' +picture.name ) #4.重定向到登录页面 return redirect('/seller/login/') return render(request,'seller/register.html',{'registerForm':registerForm}) """第二步form表单校验register,即前端前台""" class LoginForm(forms.Form): #登录就两个输入框,所以校验有两个就行了 username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder': '用户名', 'class': 'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder': '密码', 'class': 'layui-input'}) ) """登录,即后端后台""" def login(request): loginForm = LoginForm()#创建一个form表单校验类的对象 if request.method == 'POST': loginForm = LoginForm(request.POST)#将POST请求的内容给表单校验类的对象,即输入的账号密码信息给对象 if loginForm.is_valid(): # 1. 获取表单提交过来的内容,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = loginForm.cleaned_data username = data.get('username') password = data.get('password') # 2. 先加密,调用之前定义的加密函数 password = pwd_jm(password) # 3. 验证,成功跳转到首页,不成功还返回登录 ret = models.Seller.objects.filter(name=username, password=password) print(ret)#用get还是用filter,用filter,因为用get查不到用户名会报错 #报错,为了不让程序终止,要捕获异常 if ret:#有东西是true,没有是false,它返回的是Query Set[] ,空集合, # 转换为布尔值就是false,于是就执行最后一个return,回到首页 # 登录成功后将用户名保存到session中,用于首页的显示和后期的操作 request.session['username'] = username request.session['seller_id'] = ret[0].id # 如果成功,挑战到首页 return redirect('/seller/index/')# 如果不成功,重新跳转到登录页面 return render(request, 'seller/login.html', {'loginForm': loginForm}) """第五步登录装饰器,要写在需要加装饰器的视图函数的前面""" def login_decorator(func): def inner(request): username = request.session.get('username') if username: return func(request) else:
"""第三步主页""" import datetime #过滤器不能直接过滤时间戳 #给主页加一个装饰器 # @login_decorator def index(request): # 1.获取当前登录时间 times = datetime.datetime.now() # 2. 获取头像 seller_id = request.session.get('seller_id') seller_obj = models.Seller.objects.get(id=seller_id) # models.Seller.objects.get(name=request.session.get('username'))这种方法获取头像也行,要不就整个id出来,即116行 # print(seller_obj.picture.name) # print(type(seller_obj.picture.name))#是一个字符串 return render(request, 'seller/index.html', {'times': times, 'seller_obj': seller_obj}) """第四步,登出,也需要加装饰器""" # @login_decorator def logout(request): # print('--------')用打印来判断装饰器是否失效,没失效登录logout页面不会打印出这句话 # 1.清除session request.session.clear()#clear是删除内容的功能 # 2. 重定向到登录界面 return redirect('/seller/login/') #登出之后直接在地址栏输入主页index的地址,应该不让访问,方法是在def index里面第一行家判断,即下面 # username = request.session.get('username') # if not username: # return redirect('/seller/login/') #但是这样写太麻烦,登入登出即每个视图函数都得在开始写这个判断语句,所以改用装饰器 """第六步中间件实现功能,退出登陆后,不能在地址栏输入主页地址就访问进去""" #第一版注释掉 #第一版type_add注释掉 # def type_add(request): # msg='' # if request.method == 'POST': # # 1. 获取表单提交过来的数据 # type_name = request.POST.get('type_name')#获取的是type_add下输入框的name # if type_name:#有去数据库查询,没有就保存到数据库 # ret=models.GoodsType.objects.filter(name=type_name) # if not ret:#若没有not,意思就是有,查出来了,即数据库中有你输入的这个名字 # """如果数据库中没有此商品类型,则保存到数据库""" # # 2. 保存到数据库 # models.GoodsType.objects.create(name = type_name) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') # else: # msg='此商品类型已经存在' # #如果是空就不判断,直接执行下一句话 # return render(request,'seller/type_add.html') #第一版的ajax,一块注释掉 # from django.http import JsonResponse # def type_add_ajax(request): # dic={'status':'false'}#默认是false,表示没有 # #获取ajax提交过来的内容 # name = request.GET.get('name') # #在数据库中查询 # ret = models.GoodsType.objects.filter(name=name) # if ret:#表示数据库中有这个类型 # dic['status']='true' # return JsonResponse(dic) #第二版,增加按钮,用ajax的post提交,即验证了,又保存了, def type_add(request): return render(request,'seller/type_add.html') def type_add1_ajax(request): dic = {'status': 'false'} if request.method == 'POST': # 1. 获取ajax 提交过来的内容 type_name = request.POST.get('name') if type_name: # 2. 去数据库中查询 ret = models.GoodsType.objects.filter(name=type_name) if ret: """数据库中存在""" dic['status'] = 'true' else: # 3. 返回 """数据库中不存在""" models.GoodsType.objects.create(name=type_name) return JsonResponse(dic) from django.http import JsonResponse def type_add_ajax(request):#ajax类型添加和校验 return JsonResponse({'name':'xxx'}) """商品类型展示页面,即list页面""" def type_list(request): # 1. 查询数据库 goods_type_obj_list = models.GoodsType.objects.all().order_by('-id') return render(request,'seller/type_list.html',{'goods_type_obj_list':goods_type_obj_list}) """删除类型页面""" def type_delete(request): # 1. 获取id id = request.GET.get('id') # 2. 查询数据库并且删除 models.GoodsType.objects.filter(id=id).delete() # 3. 重定向到列表页面 return redirect('/seller/type_list/') #post请求查完之后返回页面 """编辑页面""" # def type_change(request): # if request.method == 'GET': # # 1.获取id # type_id = request.GET.get('id') # # 2. 查询数据库 # goods_type_obj=models.GoodsType.objects.get(id=type_id) # # 3. 返回页面 # return render(request,'seller/type_change.html',{'goods_type_obj':goods_type_obj}) # else: # """post请求""" # # 1. 获取表单提交过来的内容(id和商品类型名称) # id = request.POST.get('id') # type_name = request.POST.get('type_name') # #判断要改成的名字,是否已经存在了,做一个判断 # #查询数据库,存在就提示,不存在就修改和保存 # queryset_obj = models.GoodsType.objects.filter(name=type_name) # if not queryset_obj:#意思是要改成的名字可以用,数据库中没有这个名字 # goods_type_obj = models.GoodsType.objects.get(id=id) # goods_type_obj.name = type_name # goods_type_obj.save() # #重定向到类型列表页面 # return redirect('/seller/type_list/') # else: # #如果存在 :提示 # return render(request,'seller/type_change.html',{'error':'此类型已经存在'}) # # 2. 查询数据库并且修改 # goods_type_obj = models.GoodsType.objects.get(id=id) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') def goods_add(request): if request.method == 'GET': # 1. 查询数据库中的商品类型 goods_type_obj_list=models.GoodsType.objects.all() return render(request,'seller/goods_add.html',{'goods_type_obj_list':goods_type_obj_list}) else: #post请求 #1.获取表单提交过来的数据 goods_num = request.POST.get('goods_num')#编号 goods_name = request.POST.get('goods_name')#名称 goods_oprice = request.POST.get('goods_oprice')#原价 goods_xprice = request.POST.get('goods_xprice')#现价 goods_count = request.POST.get('goods_count')#库存 goods_type_id = request.POST.get('goods_type')#商品类型id goods_content = request.POST.get('goods_content')#商品详情 goods_description = request.POST.get('goods_description')#商品描述 userfiles=request.FILES.getlist('userfiles')#获取多张图片 #2.保存数据库 goods_obj = models.Goods.objects.create( goods_num=goods_num, goods_name=goods_name, goods_oprice=goods_oprice, goods_cprice=goods_xprice, goods_kucun=goods_count, type_id=goods_type_id, goods_detail=goods_content, goods_desc=goods_description, seller_id=request.session.get('seller_id') ) #3.保存图片 import time,datetime for userfile in userfiles: #时间戳要写在for里面,循环一个生成一个时间戳 time_temp = str(time.time())#时间戳 path = 'static/goodsimage/'+time_temp+ '_' + userfile.name with open(path,mode='wb') as f: for con in userfile.chunks(): f.write(con) #接下来将图片路径保存到数据库 models.GoodsImage.objects.create( image_address='goodimage/'+time_temp+'-'+userfile.name, goods=goods_obj ) #4.重定向到商品列表 return redirect('/seler/goods_list/') """商品列表界面""" def goods_list(request): # 1. 获取当前用户的所有商品 seller_id = request.session.get('seller_id') queryset_obj = models.Goods.objects.filter(seller_id = seller_id)#使用用户的id return render(request,'seller/goods_list.html',{'queryset_obj':queryset_obj}) """删除""" def goods_delete(request): # 1. 获取 商品id goods_id = request.GET.get('id') queryset_obj = models.GoodsImage.objects.filter(good_id=goods_id) for goods_image_obj in queryset_obj: path = goods_image_obj.image_address#图片路径 path = 'static/'+path os.remove(path) models.Goods.objects.get(id=goods_id).delete()#删除商品了 对应的图片应该也一块删除 #数据库里不用管删除图片,数据库路径自动删除,但是本地还有图片 #应该先删图片,再删数据库的路径,应该先查找商品对应的图片路径删除本地图片,在查询数据库删除路径 # 3. 重定向到商品列表界面 return redirect('/seller/goods_list/')
return redirect('/seller/login/') return inner
random_line_split
views.py
from django.shortcuts import render,redirect,HttpResponse from seller import models import os """用来加密的函数,要调用它""" import hashlib def pwd_jm(password): md5 = hashlib.md5() md5.update(password.encode()) result = md5.hexdigest() return result """3中校验方式,1.表单校验 2.装饰器校验 3.中间件校验""" """form表单校验register,归根到底算是前端,即前台""" from django import forms from django.forms import widgets#加提示语需要引入的模块 import time#时间戳需要引用的模块 class RegisterForm(forms.Form): username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder':'用户名','class':'layui-input'}) )#attrs里面的是input输入框里的属性,即placeholder属性和class属性 nickname = forms.CharField( label='昵称', required=True,#require的意思是不能为空,是一个校验条件,自动补出来了 min_length=3,#自己提示出来了,就不用再也错误信息了,前端给写好了 widget=widgets.TextInput(attrs={'placeholder':'昵称','calss':'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder':'密码','class':'layui-input'}) ) picture = forms.CharField( label='头像', required=True, widget=widgets.FileInput(attrs={'class':'layui-input'}) ) """第一步注册,为对应的后台""" def register(request): registerForm = RegisterForm()#弄一个form表单校验的对象 if request.method == 'POST': registerForm = RegisterForm(request.POST,request.FILES)#POST是校验普通字段,FILES是校验上传字段 if registerForm.is_valid(): #1.获取数据,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = registerForm.cleaned_data
username = data.get('username') nickname = data.get('nickname') password = data.get('password') #picture=data.get('picture')如果这样获取的是图片名称 picture = request.FILES.get('picture')#获取的是图片对象 time_temp = time.time()#获取当前时间戳 #2.保存图片 path = 'static/touxiang/' + str(time_temp) + '_' + picture.name with open(path,mode='wb') as f: for content in picture.chunks(): f.write(content) #3.对密码加密 password = pwd_jm(password) #4.保存到数据库 models.Seller.objects.create( name = username, nickname = nickname, password = password, picture = 'touxiang/'+str(time_temp) + '_' +picture.name ) #4.重定向到登录页面 return redirect('/seller/login/') return render(request,'seller/register.html',{'registerForm':registerForm}) """第二步form表单校验register,即前端前台""" class LoginForm(forms.Form): #登录就两个输入框,所以校验有两个就行了 username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder': '用户名', 'class': 'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder': '密码', 'class': 'layui-input'}) ) """登录,即后端后台""" def login(request): loginForm = LoginForm()#创建一个form表单校验类的对象 if request.method == 'POST': loginForm = LoginForm(request.POST)#将POST请求的内容给表单校验类的对象,即输入的账号密码信息给对象 if loginForm.is_valid(): # 1. 获取表单提交过来的内容,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = loginForm.cleaned_data username = data.get('username') password = data.get('password') # 2. 先加密,调用之前定义的加密函数 password = pwd_jm(password) # 3. 验证,成功跳转到首页,不成功还返回登录 ret = models.Seller.objects.filter(name=username, password=password) print(ret)#用get还是用filter,用filter,因为用get查不到用户名会报错 #报错,为了不让程序终止,要捕获异常 if ret:#有东西是true,没有是false,它返回的是Query Set[] ,空集合, # 转换为布尔值就是false,于是就执行最后一个return,回到首页 # 登录成功后将用户名保存到session中,用于首页的显示和后期的操作 request.session['username'] = username request.session['seller_id'] = ret[0].id # 如果成功,挑战到首页 return redirect('/seller/index/')# 如果不成功,重新跳转到登录页面 return render(request, 'seller/login.html', {'loginForm': loginForm}) """第五步登录装饰器,要写在需要加装饰器的视图函数的前面""" def login_decorator(func): def inner(request): username = request.session.get('username') if username: return func(request) else: return redirect('/seller/login/') return inner """第三步主页""" import datetime #过滤器不能直接过滤时间戳 #给主页加一个装饰器 # @login_decorator def index(request): # 1.获取当前登录时间 times = datetime.datetime.now() # 2. 获取头像 seller_id = request.session.get('seller_id') seller_obj = models.Seller.objects.get(id=seller_id) # models.Seller.objects.get(name=request.session.get('username'))这种方法获取头像也行,要不就整个id出来,即116行 # print(seller_obj.picture.name) # print(type(seller_obj.picture.name))#是一个字符串 return render(request, 'seller/index.html', {'times': times, 'seller_obj': seller_obj}) """第四步,登出,也需要加装饰器""" # @login_decorator def logout(request): # print('--------')用打印来判断装饰器是否失效,没失效登录logout页面不会打印出这句话 # 1.清除session request.session.clear()#clear是删除内容的功能 # 2. 重定向到登录界面 return redirect('/seller/login/') #登出之后直接在地址栏输入主页index的地址,应该不让访问,方法是在def index里面第一行家判断,即下面 # username = request.session.get('username') # if not username: # return redirect('/seller/login/') #但是这样写太麻烦,登入登出即每个视图函数都得在开始写这个判断语句,所以改用装饰器 """第六步中间件实现功能,退出登陆后,不能在地址栏输入主页地址就访问进去""" #第一版注释掉 #第一版type_add注释掉 # def type_add(request): # msg='' # if request.method == 'POST': # # 1. 获取表单提交过来的数据 # type_name = request.POST.get('type_name')#获取的是type_add下输入框的name # if type_name:#有去数据库查询,没有就保存到数据库 # ret=models.GoodsType.objects.filter(name=type_name) # if not ret:#若没有not,意思就是有,查出来了,即数据库中有你输入的这个名字 # """如果数据库中没有此商品类型,则保存到数据库""" # # 2. 保存到数据库 # models.GoodsType.objects.create(name = type_name) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') # else: # msg='此商品类型已经存在' # #如果是空就不判断,直接执行下一句话 # return render(request,'seller/type_add.html') #第一版的ajax,一块注释掉 # from django.http import JsonResponse # def type_add_ajax(request): # dic={'status':'false'}#默认是false,表示没有 # #获取ajax提交过来的内容 # name = request.GET.get('name') # #在数据库中查询 # ret = models.GoodsType.objects.filter(name=name) # if ret:#表示数据库中有这个类型 # dic['status']='true' # return JsonResponse(dic) #第二版,增加按钮,用ajax的post提交,即验证了,又保存了, def type_add(request): return render(request,'seller/type_add.html') def type_add1_ajax(request): dic = {'status': 'false'} if request.method == 'POST': # 1. 获取ajax 提交过来的内容 type_name = request.POST.get('name') if type_name: # 2. 去数据库中查询 ret = models.GoodsType.objects.filter(name=type_name) if ret: """数据库中存在""" dic['status'] = 'true' else: # 3. 返回 """数据库中不存在""" models.GoodsType.objects.create(name=type_name) return JsonResponse(dic) from django.http import JsonResponse def type_add_ajax(request):#ajax类型添加和校验 return JsonResponse({'name':'xxx'}) """商品类型展示页面,即list页面""" def type_list(request): # 1. 查询数据库 goods_type_obj_list = models.GoodsType.objects.all().order_by('-id') return render(request,'seller/type_list.html',{'goods_type_obj_list':goods_type_obj_list}) """删除类型页面""" def type_delete(request): # 1. 获取id id = request.GET.get('id') # 2. 查询数据库并且删除 models.GoodsType.objects.filter(id=id).delete() # 3. 重定向到列表页面 return redirect('/seller/type_list/') #post请求查完之后返回页面 """编辑页面""" # def type_change(request): # if request.method == 'GET': # # 1.获取id # type_id = request.GET.get('id') # # 2. 查询数据库 # goods_type_obj=models.GoodsType.objects.get(id=type_id) # # 3. 返回页面 # return render(request,'seller/type_change.html',{'goods_type_obj':goods_type_obj}) # else: # """post请求""" # # 1. 获取表单提交过来的内容(id和商品类型名称) # id = request.POST.get('id') # type_name = request.POST.get('type_name') # #判断要改成的名字,是否已经存在了,做一个判断 # #查询数据库,存在就提示,不存在就修改和保存 # queryset_obj = models.GoodsType.objects.filter(name=type_name) # if not queryset_obj:#意思是要改成的名字可以用,数据库中没有这个名字 # goods_type_obj = models.GoodsType.objects.get(id=id) # goods_type_obj.name = type_name # goods_type_obj.save() # #重定向到类型列表页面 # return redirect('/seller/type_list/') # else: # #如果存在 :提示 # return render(request,'seller/type_change.html',{'error':'此类型已经存在'}) # # 2. 查询数据库并且修改 # goods_type_obj = models.GoodsType.objects.get(id=id) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') def goods_add(request): if request.method == 'GET': # 1. 查询数据库中的商品类型 goods_type_obj_list=models.GoodsType.objects.all() return render(request,'seller/goods_add.html',{'goods_type_obj_list':goods_type_obj_list}) else: #post请求 #1.获取表单提交过来的数据 goods_num = request.POST.get('goods_num')#编号 goods_name = request.POST.get('goods_name')#名称 goods_oprice = request.POST.get('goods_oprice')#原价 goods_xprice = request.POST.get('goods_xprice')#现价 goods_count = request.POST.get('goods_count')#库存 goods_type_id = request.POST.get('goods_type')#商品类型id goods_content = request.POST.get('goods_content')#商品详情 goods_description = request.POST.get('goods_description')#商品描述 userfiles=request.FILES.getlist('userfiles')#获取多张图片 #2.保存数据库 goods_obj = models.Goods.objects.create( goods_num=goods_num, goods_name=goods_name, goods_oprice=goods_oprice, goods_cprice=goods_xprice, goods_kucun=goods_count, type_id=goods_type_id, goods_detail=goods_content, goods_desc=goods_description, seller_id=request.session.get('seller_id') ) #3.保存图片 import time,datetime for userfile in userfiles: #时间戳要写在for里面,循环一个生成一个时间戳 time_temp = str(time.time())#时间戳 path = 'static/goodsimage/'+time_temp+ '_' + userfile.name with open(path,mode='wb') as f: for con in userfile.chunks(): f.write(con) #接下来将图片路径保存到数据库 models.GoodsImage.objects.create( image_address='goodimage/'+time_temp+'-'+userfile.name, goods=goods_obj ) #4.重定向到商品列表 return redirect('/seler/goods_list/') """商品列表界面""" def goods_list(request): # 1. 获取当前用户的所有商品 seller_id = request.session.get('seller_id') queryset_obj = models.Goods.objects.filter(seller_id = seller_id)#使用用户的id return render(request,'seller/goods_list.html',{'queryset_obj':queryset_obj}) """删除""" def goods_delete(request): # 1. 获取 商品id goods_id = request.GET.get('id') queryset_obj = models.GoodsImage.objects.filter(good_id=goods_id) for goods_image_obj in queryset_obj: path = goods_image_obj.image_address#图片路径 path = 'static/'+path os.remove(path) models.Goods.objects.get(id=goods_id).delete()#删除商品了 对应的图片应该也一块删除 #数据库里不用管删除图片,数据库路径自动删除,但是本地还有图片 #应该先删图片,再删数据库的路径,应该先查找商品对应的图片路径删除本地图片,在查询数据库删除路径 # 3. 重定向到商品列表界面 return redirect('/seller/goods_list/')
identifier_name
views.py
from django.shortcuts import render,redirect,HttpResponse from seller import models import os """用来加密的函数,要调用它""" import hashlib def pwd_jm(password): md5 = hashlib.md5() md5.update(password.encode()) result = md5.hexdigest() return result """3中校验方式,1.表单校验 2.装饰器校验 3.中间件校验""" """form表单校验register,归根到底算是前端,即前台""" from django import forms from django.forms import widgets#加提示语需要引入的模块 import time#时间戳需要引用的模块 class RegisterForm(forms.Form): username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder':'用户名','class':'layui-input'}) )#attrs里面的是input输入框里的属性,即placeholder属性和class属性 nickname = forms.CharField( label='昵称', required=True,#require的意思是不能为空,是一个校验条件,自动补出来了 min_length=3,#自己提示出来了,就不用再也错误信息了,前端给写好了 widget=widgets.TextInput(attrs={'placeholder':'昵称','calss':'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder':'密码','class':'layui-input'}) ) picture = forms.CharField( label='头像', required=True, widget=widgets.FileInput(attrs={'class':'layui-input'}) ) """第一步注册,为对应的后台""" def register(request): registerForm = RegisterForm()#弄一个form表单校验的对象 if request.method == 'POST': registerForm = RegisterForm(request.POST,request.FILES)#POST是校验普通字段,FILES是校验上传字段 if registerForm.is_valid(): #1.获取数据,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = registerForm.cleaned_data username = data.get('username') nickname = data.get('nickname') password = data.get('password') #picture=data.get('picture')如果这样获取的是图片名称 picture = request.FILES.get('picture')#获取的是图片对象 time_temp = time.time()#获取当前时间戳 #2.保存图片 path = 'static/touxiang/' + str(time_temp) + '_' + picture.name with open(path,mode='wb') as f: for content in picture.chunks(): f.write(content) #3.对密码加密 password = pwd_jm(password) #4.保存到数据库 models.Seller.objects.create( name = username, nickname = nickname, password = password, picture = 'touxiang/'+str(time_temp) + '_' +picture.name ) #4.重定向到登录页面 return redirect('/seller/login/') return render(request,'seller/register.html',{'registerForm':registerForm}) """第二步form表单校验register,即前端前台""" class LoginForm(forms.Form): #登录就两个输入框,所以校验有两个就行了 username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder': '用户名', 'class': 'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder': '密码', 'class': 'layui-input'}) ) """登录,即后端后台""" def login(request): loginForm = LoginForm()#创建一个form表单校验类的对象 if request.method == 'POST': loginForm = LoginForm(request.POST)#将POST请求的内容给表单校验类的对象,即输入的账号密码信息给对象 if loginForm.is_valid(): # 1. 获取表单提交过来的内容,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = loginForm.cleaned_data username = data.get('username') password = data.get('password') # 2. 先加密,调用之前定义的加密函数 password = pwd_jm(password) # 3. 验证,成功跳转到首页,不成功还返回登录 ret = models.Seller.objects.filter(name=username, password=password) print(ret)#用get还是用filter,用filter,因为用get查不到用户名会报错 #报错,为了不让程序终止,要捕获异常 if ret:#有东西是true,没有是false,它返回的是Query Set[] ,空集合, # 转换为布尔值就是false,于是就执行最后一个return,回到首页 # 登录成功后将用户名保存到session中,用于首页的显示和后期的操作 request.session['username'] = username request.session['seller_id'] = ret[0].id # 如果成功,挑战到首页 return redirect('/seller/index/')# 如果不成功,重新跳转到登录页面 return render(request, 'seller/login.html', {'loginForm': loginForm}) """第五步登录装饰器,要写在需要加装饰器的视图函数的前面""" def login_decorator(func): def inner(request): username = request.session.get('username') if username: return func(request) else: return redirect('/seller/login/') return inner """第三步主页""" import datetime #过滤器不能直接过滤时间戳 #给主页加一个装饰器 # @login_decorator def index(request): # 1.获取当前登录时间 times = datetime.datetime.now() # 2. 获取头像 seller_id = request.session.get('seller_id') seller_obj = models.Seller.objects.get(id=seller_id) # models.Seller.objects.get(name=request.session.get('username'))这种方法获取头像也行,要不就整个id出来,即116行 # print(seller_obj.picture.name) # print(type(seller_obj.picture.name))#是一个字符串 return render(request, 'seller/index.html', {'times': times, 'seller_obj': seller_obj}) """第四步,登出,也需要加装饰器""" # @login_decorator def logout(request): # print('--------')用打印来判断装饰器是否失效,没失效登录logout页面不会打印出这句话 # 1.清除session request.session.clear()#clear是删除内容的功能 # 2. 重定向到登录界面 return redirect('/seller/login/') #登出之后直接在地址栏输入主页index的地址,应该不让访问,方法是在def index里面第一行家判断,即下面 # username = request.session.get('username') # if not username: # return redirect('/seller/login/') #但是这样写太麻烦,登入登出即每个视图函数都得在开始写这个判断语句,所以改用装饰器 """第六步中间件实现功能,退出登陆后,不能在地址栏输入主页地址就访问进去""" #第一版注释掉 #第一版type_add注释掉 # def type_add(request): # msg='' # if request.method == 'POST': # # 1. 获取表单提交过来的数据 # type_name = request.POST.get('type_name')#获取的是type_add下输入框的name # if type_name:#有去数据库查询,没有就保存到数据库 # ret=models.GoodsType.objects.filter(name=type_name) # if not ret:#若没有not,意思就是有,查出来了,即数据库中有你输入的这个名字 # """如果数据库中没有此商品类型,则保存到数据库""" # # 2. 保存到数据库 # models.GoodsType.objects.create(name = type_name) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') # else: # msg='此商品类型已经存在' # #如果是空就不判断,直接执行下一句话 # return render(request,'seller/type_add.html') #第一版的ajax,一块注释掉 # from django.http import JsonResponse # def type_add_ajax(request): # dic={'status':'false'}#默认是false,表示没有 # #获取ajax提交过来的内容 # name = request.GET.get('name') # #在数据库中查询 # ret = models.GoodsType.objects.filter(name=name) # if ret:#表示数据库中有这个类型 # dic['status']='true' # return JsonResponse(dic) #第二版,增加按钮,用ajax的post提交,即验证了,又保存了, def type_add(request): return render(request,'seller/type_add.html') def type_add1_ajax(request): dic = {'status': 'false'} if request.method == 'POST': # 1. 获取ajax 提交过来的内容 type_name = request.POST.get('name') if type_name: # 2. 去数据库中查询 ret = models.GoodsType.objects.filter(name=type_name) if ret: """数据库中存在""" dic['status'] = 'true' else: # 3. 返回 """数据库中不存在""" models.GoodsType.objects.create(name=type_name) return JsonResponse(dic) from django.http import JsonResponse def type_add_ajax(request):#ajax类型添加和校验 return JsonResponse({'name':'xxx'}) """商品类型展示页面,即list页面""" def type_list(request): # 1. 查询数据库 goods_type_obj_list = models.GoodsType.objects.all().order_by('-id') return render(request,'seller/type_list.html',{'goods_type_obj_list':goods_type_obj_list}) """删除类型页面""" def type_delete(request): # 1. 获取id id = request.GET.get('id') # 2. 查询数据库并且删除 models.GoodsType.objects.filter(id=id).delete() # 3. 重定向到列表页面 return redirect('/seller/type_list/') #post请求查完之后返回页面 """编辑页面""" # def type_change(request): # if request.method == 'GET': # # 1.获取id # type_id = request.GET.get('id') # # 2. 查询数据库 # goods_type_obj=models.GoodsType.objects.get(id=type_id) # # 3. 返回页面 # return render(request,'seller/type_change.html',{'goods_type_obj':goods_type_obj}) # else: # """post请求""" # # 1. 获取表单提交过来的内容(id和商品类型名称) # id = request.POST.get('id') # type_name = request.POST.get('type_name') # #判断要改成的名字,是否已经存在了,做一个判断 # #查询数据库,存在就提示,不存在就修改和保存 # queryset_obj = models.GoodsType.objects.filter(name=type_name) # if not queryset_obj:#意思是要改成的名字可以用,数据库中没有这个名字 # goods_type_obj = models.GoodsType.objects.get(id=id) # goods_type_obj.name = type_name # goods_type_obj.save() # #重定向到类型列表页面 # return redirect('/seller/type_list/') # else: # #如果存在 :提示 # return render(request,'seller/type_change.html',{'error':'此类型已经存在'}) # # 2. 查询数据库并且修改 # goods_type_obj = models.GoodsType.objects.get(id=id) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') def goods_add(request): if request.method == 'GET': # 1. 查询数据库中的商品类型 goods_type_obj_list=models.GoodsType.objects.all() return render(request,'seller/goods_add.html',{'goods_type_obj_list':goods_type_obj_list}) else: #post请求 #1.获取表单提交过来的数据 goods_num = request.POST.get('goods_num')#编号 goods_name = request.POST.get('goods_name')#名称 goods_oprice = request.POST.get('goods_oprice')#原价 goods_xprice = request.POST.get('goods_xprice')#现价 goods_count = request.POST.get('goods_count')#库存 goods_type_id = request.POST.get('goods_type')#商品类型id goods_content = request.POST.get('goods_content')#商品详情 goods_description = request.POST.get('goods_description')#商品描述 userfiles=request.FILES.getlist('userfiles')#获取多张图片 #2.保存数据库 goods_obj = models.Goods.objects.create( goods_num=goods_num, goods_name=goods_name, goods_oprice=goods_oprice, goods_cprice=goods_xprice, goods_kucun=goods_count, type_id=goods_type_id, goods_detail=goods_content, goods_desc=goods_description, seller_id=request.session.get('seller_id') ) #3.保存图片 import time,datetime for userfile in userfiles: #时间戳要写在for里面,循环一个生成一个时间戳 time_temp = str(time.time())#时间戳 path = 'static/goodsimage/'+time_temp+ '_' + userfile.name with open(path,mode='wb') as f: for con in userfile.chunks(): f.write(con) #接下来将图片路径保存到数据库 models.GoodsImage.objects.create( image_address='goodimage/'+time_temp+'-'+userfile.name, goods=goods_obj ) #4.重定向到商品列表 return redirect('/seler/goods_list/') """商品列表界面""" def goods_list(request): # 1. 获取当前用户的所有商品 seller_id = request.session.get('seller_id') queryset_obj = models.Goods.objects.filter(seller_id = seller_id)#使用用户的id return render(request,'seller/goods_list.html',{'queryset_obj':queryset_obj}) """删除""" def goods_delete(request): # 1. 获取 商品id goods_id = request.GET.get('id') queryset_obj = models.GoodsImage.objects.filter(good_id=goods_id) for goods_image_obj in queryset_obj: path = goods_image_obj.image_address#图片路径 path = 'static/'+path os.remove(path) models.Goods.objects.get(id=goods_id).delete()#删除商品了 对应的图片应该也一块删除 #数据库里不用管删除图片,数据库路径自动删除,但是本地还有图片 #应该先删图片,再删数据库的路径,应该先查找商品对应的图片路径删除本地图片,在查询数据库删除路径 # 3. 重定向到商品列表界面 return redirect('/seller/goods_list/')
identifier_body
views.py
from django.shortcuts import render,redirect,HttpResponse from seller import models import os """用来加密的函数,要调用它""" import hashlib def pwd_jm(password): md5 = hashlib.md5() md5.update(password.encode()) result = md5.hexdigest() return result """3中校验方式,1.表单校验 2.装饰器校验 3.中间件校验""" """form表单校验register,归根到底算是前端,即前台""" from django import forms from django.forms import widgets#加提示语需要引入的模块 import time#时间戳需要引用的模块 class RegisterForm(forms.Form): username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder':'用户名','class':'layui-input'}) )#attrs里面的是input输入框里的属性,即placeholder属性和class属性 nickname = forms.CharField( label='昵称', required=True,#require的意思是不能为空,是一个校验条件,自动补出来了 min_length=3,#自己提示出来了,就不用再也错误信息了,前端给写好了 widget=widgets.TextInput(attrs={'placeholder':'昵称','calss':'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder':'密码','class':'layui-input'}) ) picture = forms.CharField( label='头像', required=True, widget=widgets.FileInput(attrs={'class':'layui-input'}) ) """第一步注册,为对应的后台""" def register(request): registerForm = RegisterForm()#弄一个form表单校验的对象 if request.method == 'POST': registerForm = RegisterForm(request.POST,request.FILES)#POST是校验普通字段,FILES是校验上传字段 if registerForm.is_valid(): #1.获取数据,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = registerForm.cleaned_data username = data.get('username') nickname = data.get('nickname') password = data.get('password') #picture=data.get('picture')如果这样获取的是图片名称 picture = request.FILES.get('picture')#获取的是图片对象 time_temp = time.time()#获取当前时间戳 #2.保存图片 path = 'static/touxiang/' + str(time_temp) + '_' + picture.name with open(path,mode='wb') as f: for content in picture.chunks(): f.write(content) #3.对密码加密 password = pwd_jm(password) #4.保存到数据库 models.Seller.objects.create( name = username, nickname = nickname, password = password, picture = 'touxiang/'+str(time_temp) + '_' +picture.name ) #4.重定向到登录页面 return redirect('/seller/login/') return render(request,'seller/register.html',{'registerForm':registerForm}) """第二步form表单校验register,即前端前台""" class LoginForm(forms.Form): #登录就两个输入框,所以校验有两个就行了 username = forms.CharField( label='用户名', required=True, min_length=3, widget=widgets.TextInput(attrs={'placeholder': '用户名', 'class': 'layui-input'}) ) password = forms.CharField( label='密码', required=True, min_length=6, widget=widgets.PasswordInput(attrs={'placeholder': '密码', 'class': 'layui-input'}) ) """登录,即后端后台""" def login(request): loginForm = LoginForm()#创建一个form表单校验类的对象 if request.method == 'POST': loginForm = LoginForm(request.POST)#将POST请求的内容给表单校验类的对象,即输入的账号密码信息给对象 if loginForm.is_valid(): # 1. 获取表单提交过来的内容,cleaned_data 就是读取表单返回的值,返回类型为字典dict型 data = loginForm.cleaned_data username = data.get('username') password = data.get('password') # 2. 先加密,调用之前定义的加密函数 password = pwd_jm(password) # 3. 验证,成功跳转到首页,不成功还返回登录 ret = models.Seller.objects.filter(name=username, password=password) print(ret)#用get还是用filter,用filter,因为用get查不到用户名会报错 #报错,为了不让程序终止,要捕获异常 if ret:#有东西是true,没有是false,它返回的是Query Set[] ,空集合, # 转换为布尔值就是false,于是就执行最后一个return,回到首页 # 登录成功后将用户名保存到session中,用于首页的显示和后期的操作 request.session['username'] = username request.session['seller_id'] = ret[0].id # 如果成功,挑战到首页 return redirect('/seller/index/')# 如果不成功,重新跳转到登录页面 return render(request, 'seller/login.html', {'loginForm': loginForm}) """第五步登录装饰器,要写在需要加装饰器的视图函数的前面""" def login_decorator(func): def inner(request): username = request.session.get('username') if username: return func(request) else: return redirect('/seller/login/') return inner """第三步主页""" import datetime #过滤器不能直接过滤时间戳 #给主页加一个装饰器 # @login_decorator def index(request): # 1.获取当前登录时间 times = datetime.datetime.now() # 2. 获取头像 seller_id = request.session.get('seller_id') seller_obj = models.Seller.objects.get(id=seller_id) # models.Seller.objects.get(name=request.session.get('username'))这种方法获取头像也行,要不就整个id出来,即116行 # print(seller_obj.picture.name) # print(type(seller_obj.picture.name))#是一个字符串 return render(request, 'seller/index.html', {'times': times, 'seller_obj': seller_obj}) """第四步,登出,也需要加装饰器""" # @login_decorator def logout(request): # print('--------')用打印来判断装饰器是否失效,没失效登录logout页面不会打印出这句话 # 1.清除session request.session.clear()#clear是删除内容的功能 # 2. 重定向到登录界面 return redirect('/seller/login/') #登出之后直接在地址栏输入主页index的地址,应该不让访问,方法是在def index里面第一行家判断,即下面 # username = request.session.get('username') # if not username: # return redirect('/seller/login/') #但是这样写太麻烦,登入登出即每个视图函数都得在开始写这个判断语句,所以改用装饰器 """第六步中间件实现功能,退出登陆后,不能在地址栏输入主页地址就访问进去""" #第一版注释掉 #第一版type_add注释掉 # def type_add(request): # msg='' # if request.method == 'POST': # # 1. 获取表单提交过来的数据 # type_name = request.POST.get('type_name')#获取的是type_add下输入框的name # if type_name:#有去数据库查询,没有就保存到数据库 # ret=models.GoodsType.objects.filter(name=type_name) # if not ret:#若没有not,意思就是有,查出来了,即数据库中有你输入的这个名字 # """如果数据库中没有此商品类型,则保存到数据库""" # # 2. 保存到数据库 # models.GoodsType.objects.create(name = type_name) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') # else: # msg='此商品类型已经存在' # #如果是空就不判断,直接执行下一句话 # return render(request,'seller/type_add.html') #第一版的ajax,一块注释掉 # from django.http import JsonResponse # def type_add_ajax(request): # dic={'status':'false'}#默认是false,表示没有 # #获取ajax提交过来的内容 # name = request.GET.get('name') # #在数据库中查询 # ret = models.GoodsType.objects.filter(name=name) # if ret:#表示数据库中有这个类型 # dic['status']='true' # return JsonResponse(dic) #第二版,增加按钮,用ajax的post提交,即验证了,又保存了, def type_add(request): return render(request,'seller/type_add.html') def type_add1_ajax(request): dic = {'status': 'false'} if request.method == 'POST': # 1. 获取ajax 提交过来的内容 type_name = request.POST.get('name') if type_name: # 2. 去数据库中查询 ret = models.GoodsType.objects.filter(name=type_name) if ret: """数据库中存在""" dic['status'] = 'true' else: # 3. 返回 """数据库中不存在""" models.GoodsType.objects.create(name=type_name) return JsonResponse(dic) from django.http import JsonResponse def type_add_ajax(request):#ajax类型添加和校验 return JsonResponse({'name':'xxx'}) """商品类型展示页面,即list页面""" def type_list(request): # 1. 查询数据库 goods_type_obj_list = models.GoodsType.objects.all().order_by('-id') return render(request,'seller/type_list.html',{'goods_type_obj_list':goods_type_obj_list}) """删除类型页面""" def type_delete(request): # 1. 获取id id = request.GET.get('id') # 2. 查询数据库并且删除 models.GoodsType.objects.filter(id=id).delete() # 3. 重定向到列表页面 return redirect('/seller/type_list/') #post请求查完之后返回页面 """编辑页面""" # def type_change(request): # if request.method == 'GET': # # 1.获取id # type_id = request.GET.get('id') # # 2. 查询数据库 # goods_type_obj=models.GoodsType.objects.get(id=type_id) # # 3. 返回页面 # return render(request,'seller/type_change.html',{'goods_type_obj':goods_type_obj}) # else: # """post请求""" # # 1. 获取表单提交过来的内容(id和商品类型名称) # id = request.POST.get('id') # type_name = request.POST.get('type_name') # #判断要改成的名字,是否已经存在了,做一个判断 # #查询数据库,存在就提示,不存在就修改和保存 # queryset_obj = models.GoodsType.objects.filter(name=type_name) # if not queryset_obj:#意思是要改成的名字可以用,数据库中没有这个名字 # goods_type_obj = models.GoodsType.objects.get(id=id) # goods_type_obj.name = type_name # goods_type_obj.save() # #重定向到类型列表页面 # return redirect('/seller/type_list/') # else: # #如果存在 :提示 # return render(request,'seller/type_change.html',{'error':'此类型已经存在'}) # # 2. 查询数据库并且修改 # goods_type_obj = models.GoodsType.objects.get(id=id) # # 3. 重定向到类型列表展示页面 # return redirect('/seller/type_list/') def goods_add(request): if request.method == 'GET': # 1. 查询数据库中的商品类型 goods_type_obj_list=models.GoodsType.objects.all() return render(request,'seller/goods_add.html',{'goods_type_obj_list':goods_type_obj_list}) else: #post请求 #1.获取表单提交过来的数据 goods_num = request.POST.get('goods_num')#编号 goods_name = request.POST.get('goods_name')#名称 goods_oprice = request.POST.get('goods_oprice')#原价 goods_xprice = request.POST.get('goods_xprice')#现价 goods_count = request.POST.get('goods_count')#库存 goods_type_id = request.POST.get('goods_type')#商品类型id goods_content = request.POST.get('goods_content')#商品详情 goods_description = request.POST.get('goods_description')#商品描述 userfiles=request.FILES.getlist('userfiles')#获取多张图片 #2.保存数据库 goods_obj = models.Goods.objects.create( goods_num=goods_num, goods_name=goods_name, goods_oprice=goods_oprice, goods_cprice=goods_xprice, goods_kucun=goods_count, type_id=goods_type_id, goods_detail=goods_content, goods_desc=goods_description, seller_id=request.session.get('seller_id') ) #3.保存图片 import time,datetime for userfile in userfiles: #时间戳要写在for里面,循环一个生成一个时间戳 time_temp = str(time.time())#时间戳 path = 'static/goodsimage/'+time_temp+ '_' + userfile.name with open(path,mode='wb') as f: for con in userfile.chunks(): f.write(con) #接下来将图片路径保存到数据库 models.GoodsImage.objects.create( image_address='goodimage/'+time_temp+'-'+userfile.name, goods=goods_obj ) #4.重定向到商品列表 return redirect('/seler/goods_list/') """商品列表界面""" def goods_list(request): # 1. 获取当前用户的所有商品 seller_id = request.session.get('seller_id') queryset_obj = models.Goods.objects.filter(seller_id = seller_id)#使用用户的id return render(request,'seller/goods_list.html',{'queryset_obj':queryset_obj}) """删除""" def goods_delete(request): # 1. 获取 商品id goods_id = request.GET.get('id') queryset_obj = models.GoodsImage.objects.filter(good_id=goods_id) for goods_image_obj in queryset_obj: path = goods_image_obj.image_address#图片路径 path = 'static/'+path os.remove(path) models.Goods.objects.get(id=goods_id).delete()#删除商品了 对应的图片应该也一块删除 #数据库里不用管删除图片,数据库路径自动删除,但是本地还有图片 #应该先删图片,再删数据库的路径,应该先查找商品对应的图片路径删除本地图片,在查询数据库删除路径 # 3. 重定向到商品列表界面 return redirect('/seller/goods_list/')
conditional_block
pan2mqtt.py
#!/usr/bin/python # -*- coding:utf-8 -*- #dependence: paho-mqtt (pip install paho-mqtt) # XBee (pip install XBee) # PyYAML (pip install PyYaml) # pyserial (pip install pyserial) import os import sys import time import logging import yaml from serial import Serial from factory import * from pan import * from filters import * from plugins import * from paho.mqtt import client from daemon import Daemon import sqlite3 as database class PAN2MQTT(Daemon): """ PAN network to MQTT bridge Supported PAN radio: XBee, Mesh Bee(from seeedstudio) To port a new radio driver, two method must be implemented: on_message, send_message """ def __init__ (self, logger, cfg): """ """ Daemon.__init__(self,cfg['general']['pidfile']) self.logger = logger self.config = cfg self.mqtt_connected = False self.mqtt_subcriptions = {} self.downlink_topics = {} self.uplink_topics = {} self.pan = Factory(self.config['pan']['driver_class']) if not self.pan: self.__log(logging.ERROR, "Can't instant pan driver") sys.exit(2) self.pan.logger = logger self.pan.on_message = self.on_message_from_pan self.stdout = self.config['general']['stdout'] self.stderr = self.config['general']['stdout'] self.host = self.config['mqtt']['host'] self.client_id = self.config['mqtt']['client_id'] self.mqtt_qos = self.config['mqtt']['qos'] self.mqtt_retain = self.config['mqtt']['retain'] self.status_topic = self.config['mqtt']['status_topic'] self.mqtt_client = client.Client(self.client_id, self.config['mqtt']['clean_session']) if self.__try_get_config(self.config['mqtt'], "username", None): self.mqtt_client.username_pw_set(self.config['mqtt']['username'], self.config['mqtt']['password']) if self.config['mqtt']['set_will']: self.mqtt_client.will_set(self.status_topic.format(client_id=self.client_id), "0", self.mqtt_qos, self.mqtt_retain) self.mqtt_client.on_connect = self.on_mqtt_connect self.mqtt_client.on_disconnect = self.on_mqtt_disconnect self.mqtt_client.on_message = self.on_message_from_mqtt self.mqtt_client.on_subscribe = self.on_mqtt_subscribe self.mqtt_client.on_log = self.on_mqtt_log self.plugins = self.__try_get_config(self.config, 'plugin', None) if not isinstance(self.plugins, dict): self.plugins = {self.plugins} self.plugins_ins = {} ### private method def __log(self, level, message): if self.logger: self.logger.log(level, message) @staticmethod def __try_get_config (parent, key, default): try: return parent[key] except: return default def __parse_nodes (self): self.downlink_topics = {} self.uplink_topics = {} if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink": self.uplink_topics[(mac, topic_content['match_key'])] = (topic,self.__try_get_config(topic_content,'filter',None)) elif topic_content['dir'] == "downlink": self.downlink_topics[topic] = (mac, topic_content) else: self.__log(logging.ERROR, "Unknown 'dir'") def __sub_downlink_topics (self): if not self.mqtt_connected: return for t in self.downlink_topics: rc, mid = self.mqtt_client.subscribe(t, self.mqtt_qos) self.mqtt_subcriptions[mid] = t self.__log(logging.INFO, "Sent subscription request to topic %s" % t) def __filter (self, input, filter_config): try: filter = Factory(filter_config['type']) if filter: filter.configure(filter_config['parameters']) if filter.validate(): return filter.process(input) except: pass return input #response topic list to client which requires this def __resp_topic_list(self, dst_topic): ''' Broadcast gateway information when the gateway thread is starting ''' str_topic_holder = '' if self.config['pan']['nodes']:
print "topic list:" + str_topic_holder self.mqtt_client.publish(dst_topic, str_topic_holder, 2) ### def on_mqtt_connect (self, client, userdata, flags, rc): if rc == 0: self.__log(logging.INFO, "Connected to MQTT broker: %s" % self.host) self.mqtt_client.publish(self.status_topic.format(client_id=self.client_id), "1") self.mqtt_connected = True self.__sub_downlink_topics() else: self.__log(logging.ERROR, "Could not connect to MQTT broker: %s" % self.host) self.__log(logging.ERROR, "Error code: %d" % rc) self.mqtt_connected = False def on_mqtt_disconnect (self, client, userdata, rc): self.mqtt_connected = False self.__log(logging.INFO, "Disconnected from MQTT broker: %s"%self.host) self.__log(logging.INFO, "Return code: %d"%rc) if rc!=0: self.__log(logging.ERROR, "Unexpected disconnect, waiting reconnect...") def on_mqtt_subscribe (self,client, userdata, mid, granted_qos): topic = self.mqtt_subcriptions.get(mid, "Unknown") self.__log(logging.INFO, "Sub to topic %s confirmed"%topic) def on_mqtt_log (self, client, userdata, level, buf): self.__log(logging.DEBUG, buf) def on_message_from_pan (self, mac, key, value, type): self.__log(logging.INFO, "Received message from PAN: %s, %s:%s" % (mac, key, value)) #walk over plugins and determin whether to drop ''' there are two callback in each plugin 1.on_message_from_pan abstract function in base description: do something when receives pan event 2.pre_publish description: do something before publish to broker ''' for name,p in self.plugins_ins.items(): if not p.on_message_from_pan(mac, key, value, type): return False #search the topic try: if self.uplink_topics[(mac,key)]: topic, filter = self.uplink_topics[(mac,key)] #apply the filter value_f = value if filter: value_f = self.__filter(value, filter) #walk over plugins and call the callback which watches on the publishment for name,p in self.plugins_ins.items(): if p.pre_publish: p.pre_publish(topic, value_f, value) #publish the topic self.__log(logging.INFO, "Publishing to topic: %s"%topic) self.mqtt_client.publish(topic, value_f, self.mqtt_qos, self.mqtt_retain) except KeyError, e: self.__log(logging.WARNING, "Received message unrecognized: %s" % e) def on_message_from_mqtt (self,client, userdata, message): self.__log(logging.INFO, "Received message from MQTT: %s: %s, qos %d" % (message.topic,message.payload,message.qos)) #walk over plugins and determin whether to drop for name,p in self.plugins_ins.items(): if not p.on_message_from_mqtt(message.topic, message.payload, message.qos): return False #search the topic if self.downlink_topics[message.topic]: mac, topic = self.downlink_topics[message.topic] #apply the filters if self.__try_get_config(topic, 'filter', None): value = self.__filter(message.payload, topic['filter']) else: value = message.payload #handle the topic types if topic['type'] == 'dio': self.pan.send_message('dio', mac, value, port = topic['dio_num']) #self.__log(logging.DEBUG, "sent dio message") elif topic['type'] == 'data': self.pan.send_message('data', mac, value) elif topic['type'] == 'rpc': pass elif topic['type'] == 'listening': #to specified client self.__resp_topic_list(str(value)) else: self.__log(logging.ERROR, "Unknown downlink handler type: %s" % topic['type']) return else: self.__log(logging.ERROR,"Received an unknown topic '%s' from mqtt" % message.topic) return def do_reload (self): self.__log(logging.DEBUG, "Reload not implemented now") def run (self): self.__log(logging.INFO, "Starting Pan2Mqtt %s" % self.config['general']['version']) #parse nodes, up/down-link channels self.__parse_nodes() #connect mqtt self.mqtt_client.connect(self.host, self.config['mqtt']['port'], self.config['mqtt']['keepalive']) sec=0 while True: if self.mqtt_connected: break else: self.mqtt_client.loop() sec=sec+1 if sec > 60: self.stop() sys.exit(2) #connect pan radio try: serial = Serial(self.config['pan']['port'], self.config['pan']['baudrate']) except Exception,e: self.__log(logging.ERROR, "Can't open serial: %s" % e) sys.exit(2) self.pan.serial = serial if not self.pan.connect(): self.stop() #start the plugins for p in self.plugins: ins = Factory(p) if ins: self.plugins_ins[p] = ins if self.__try_get_config(self.config['plugin'], p, None): self.plugins_ins[p].config = self.config['plugin'][p] self.plugins_ins[p].global_config = self.config self.plugins_ins[p].send_to_pan = self.pan.send_message self.plugins_ins[p].send_to_mqtt = self.mqtt_client.publish self.plugins_ins[p].start() else: self.__log(logging.ERROR, "Can not make the instance of %s from factory"%p) #blocking loop try: self.mqtt_client.loop_forever() except KeyboardInterrupt: self.__log(logging.ERROR, "Terminated by user") self.cleanup() def cleanup (self): self.pan.disconnect() self.__log(logging.INFO, "Cleaning up...") self.mqtt_client.disconnect() if os.path.exists(self.pidfile): os.remove(self.pidfile) for name, p in self.plugins_ins.items(): p.cleanup() sys.exit() def resolve_path(path): return path if path[0] == '/' else os.path.join(os.path.dirname(os.path.realpath(__file__)), path) if __name__ == '__main__': config_file = './pan2mqtt.yaml' fh = file(resolve_path(config_file), 'r') config = yaml.load(fh) fh.close() handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(config['general']['log_level']) logger.addHandler(handler) gw = PAN2MQTT(logger, config) if len(sys.argv) == 2: if 'start' == sys.argv[1]: gw.start() elif 'stop' == sys.argv[1]: gw.stop() elif 'restart' == sys.argv[1]: gw.restart() elif 'reload' == sys.argv[1]: gw.reload() elif 'foreground' == sys.argv[1]: gw.run() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart|foreground" % sys.argv[0] sys.exit(2)
for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink" and topic_content['type'] != "listening": str_topic_holder = str_topic_holder + topic + "@"
conditional_block
pan2mqtt.py
#!/usr/bin/python # -*- coding:utf-8 -*- #dependence: paho-mqtt (pip install paho-mqtt) # XBee (pip install XBee) # PyYAML (pip install PyYaml) # pyserial (pip install pyserial) import os import sys import time import logging import yaml from serial import Serial from factory import * from pan import * from filters import * from plugins import * from paho.mqtt import client from daemon import Daemon import sqlite3 as database class PAN2MQTT(Daemon): """ PAN network to MQTT bridge Supported PAN radio: XBee, Mesh Bee(from seeedstudio) To port a new radio driver, two method must be implemented: on_message, send_message """ def __init__ (self, logger, cfg): """ """ Daemon.__init__(self,cfg['general']['pidfile']) self.logger = logger self.config = cfg self.mqtt_connected = False self.mqtt_subcriptions = {} self.downlink_topics = {} self.uplink_topics = {} self.pan = Factory(self.config['pan']['driver_class']) if not self.pan: self.__log(logging.ERROR, "Can't instant pan driver") sys.exit(2) self.pan.logger = logger self.pan.on_message = self.on_message_from_pan self.stdout = self.config['general']['stdout'] self.stderr = self.config['general']['stdout'] self.host = self.config['mqtt']['host'] self.client_id = self.config['mqtt']['client_id'] self.mqtt_qos = self.config['mqtt']['qos'] self.mqtt_retain = self.config['mqtt']['retain'] self.status_topic = self.config['mqtt']['status_topic'] self.mqtt_client = client.Client(self.client_id, self.config['mqtt']['clean_session']) if self.__try_get_config(self.config['mqtt'], "username", None): self.mqtt_client.username_pw_set(self.config['mqtt']['username'], self.config['mqtt']['password']) if self.config['mqtt']['set_will']: self.mqtt_client.will_set(self.status_topic.format(client_id=self.client_id), "0", self.mqtt_qos, self.mqtt_retain) self.mqtt_client.on_connect = self.on_mqtt_connect self.mqtt_client.on_disconnect = self.on_mqtt_disconnect self.mqtt_client.on_message = self.on_message_from_mqtt self.mqtt_client.on_subscribe = self.on_mqtt_subscribe self.mqtt_client.on_log = self.on_mqtt_log self.plugins = self.__try_get_config(self.config, 'plugin', None) if not isinstance(self.plugins, dict): self.plugins = {self.plugins} self.plugins_ins = {} ### private method def __log(self, level, message): if self.logger: self.logger.log(level, message) @staticmethod def __try_get_config (parent, key, default): try: return parent[key] except: return default def
(self): self.downlink_topics = {} self.uplink_topics = {} if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink": self.uplink_topics[(mac, topic_content['match_key'])] = (topic,self.__try_get_config(topic_content,'filter',None)) elif topic_content['dir'] == "downlink": self.downlink_topics[topic] = (mac, topic_content) else: self.__log(logging.ERROR, "Unknown 'dir'") def __sub_downlink_topics (self): if not self.mqtt_connected: return for t in self.downlink_topics: rc, mid = self.mqtt_client.subscribe(t, self.mqtt_qos) self.mqtt_subcriptions[mid] = t self.__log(logging.INFO, "Sent subscription request to topic %s" % t) def __filter (self, input, filter_config): try: filter = Factory(filter_config['type']) if filter: filter.configure(filter_config['parameters']) if filter.validate(): return filter.process(input) except: pass return input #response topic list to client which requires this def __resp_topic_list(self, dst_topic): ''' Broadcast gateway information when the gateway thread is starting ''' str_topic_holder = '' if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink" and topic_content['type'] != "listening": str_topic_holder = str_topic_holder + topic + "@" print "topic list:" + str_topic_holder self.mqtt_client.publish(dst_topic, str_topic_holder, 2) ### def on_mqtt_connect (self, client, userdata, flags, rc): if rc == 0: self.__log(logging.INFO, "Connected to MQTT broker: %s" % self.host) self.mqtt_client.publish(self.status_topic.format(client_id=self.client_id), "1") self.mqtt_connected = True self.__sub_downlink_topics() else: self.__log(logging.ERROR, "Could not connect to MQTT broker: %s" % self.host) self.__log(logging.ERROR, "Error code: %d" % rc) self.mqtt_connected = False def on_mqtt_disconnect (self, client, userdata, rc): self.mqtt_connected = False self.__log(logging.INFO, "Disconnected from MQTT broker: %s"%self.host) self.__log(logging.INFO, "Return code: %d"%rc) if rc!=0: self.__log(logging.ERROR, "Unexpected disconnect, waiting reconnect...") def on_mqtt_subscribe (self,client, userdata, mid, granted_qos): topic = self.mqtt_subcriptions.get(mid, "Unknown") self.__log(logging.INFO, "Sub to topic %s confirmed"%topic) def on_mqtt_log (self, client, userdata, level, buf): self.__log(logging.DEBUG, buf) def on_message_from_pan (self, mac, key, value, type): self.__log(logging.INFO, "Received message from PAN: %s, %s:%s" % (mac, key, value)) #walk over plugins and determin whether to drop ''' there are two callback in each plugin 1.on_message_from_pan abstract function in base description: do something when receives pan event 2.pre_publish description: do something before publish to broker ''' for name,p in self.plugins_ins.items(): if not p.on_message_from_pan(mac, key, value, type): return False #search the topic try: if self.uplink_topics[(mac,key)]: topic, filter = self.uplink_topics[(mac,key)] #apply the filter value_f = value if filter: value_f = self.__filter(value, filter) #walk over plugins and call the callback which watches on the publishment for name,p in self.plugins_ins.items(): if p.pre_publish: p.pre_publish(topic, value_f, value) #publish the topic self.__log(logging.INFO, "Publishing to topic: %s"%topic) self.mqtt_client.publish(topic, value_f, self.mqtt_qos, self.mqtt_retain) except KeyError, e: self.__log(logging.WARNING, "Received message unrecognized: %s" % e) def on_message_from_mqtt (self,client, userdata, message): self.__log(logging.INFO, "Received message from MQTT: %s: %s, qos %d" % (message.topic,message.payload,message.qos)) #walk over plugins and determin whether to drop for name,p in self.plugins_ins.items(): if not p.on_message_from_mqtt(message.topic, message.payload, message.qos): return False #search the topic if self.downlink_topics[message.topic]: mac, topic = self.downlink_topics[message.topic] #apply the filters if self.__try_get_config(topic, 'filter', None): value = self.__filter(message.payload, topic['filter']) else: value = message.payload #handle the topic types if topic['type'] == 'dio': self.pan.send_message('dio', mac, value, port = topic['dio_num']) #self.__log(logging.DEBUG, "sent dio message") elif topic['type'] == 'data': self.pan.send_message('data', mac, value) elif topic['type'] == 'rpc': pass elif topic['type'] == 'listening': #to specified client self.__resp_topic_list(str(value)) else: self.__log(logging.ERROR, "Unknown downlink handler type: %s" % topic['type']) return else: self.__log(logging.ERROR,"Received an unknown topic '%s' from mqtt" % message.topic) return def do_reload (self): self.__log(logging.DEBUG, "Reload not implemented now") def run (self): self.__log(logging.INFO, "Starting Pan2Mqtt %s" % self.config['general']['version']) #parse nodes, up/down-link channels self.__parse_nodes() #connect mqtt self.mqtt_client.connect(self.host, self.config['mqtt']['port'], self.config['mqtt']['keepalive']) sec=0 while True: if self.mqtt_connected: break else: self.mqtt_client.loop() sec=sec+1 if sec > 60: self.stop() sys.exit(2) #connect pan radio try: serial = Serial(self.config['pan']['port'], self.config['pan']['baudrate']) except Exception,e: self.__log(logging.ERROR, "Can't open serial: %s" % e) sys.exit(2) self.pan.serial = serial if not self.pan.connect(): self.stop() #start the plugins for p in self.plugins: ins = Factory(p) if ins: self.plugins_ins[p] = ins if self.__try_get_config(self.config['plugin'], p, None): self.plugins_ins[p].config = self.config['plugin'][p] self.plugins_ins[p].global_config = self.config self.plugins_ins[p].send_to_pan = self.pan.send_message self.plugins_ins[p].send_to_mqtt = self.mqtt_client.publish self.plugins_ins[p].start() else: self.__log(logging.ERROR, "Can not make the instance of %s from factory"%p) #blocking loop try: self.mqtt_client.loop_forever() except KeyboardInterrupt: self.__log(logging.ERROR, "Terminated by user") self.cleanup() def cleanup (self): self.pan.disconnect() self.__log(logging.INFO, "Cleaning up...") self.mqtt_client.disconnect() if os.path.exists(self.pidfile): os.remove(self.pidfile) for name, p in self.plugins_ins.items(): p.cleanup() sys.exit() def resolve_path(path): return path if path[0] == '/' else os.path.join(os.path.dirname(os.path.realpath(__file__)), path) if __name__ == '__main__': config_file = './pan2mqtt.yaml' fh = file(resolve_path(config_file), 'r') config = yaml.load(fh) fh.close() handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(config['general']['log_level']) logger.addHandler(handler) gw = PAN2MQTT(logger, config) if len(sys.argv) == 2: if 'start' == sys.argv[1]: gw.start() elif 'stop' == sys.argv[1]: gw.stop() elif 'restart' == sys.argv[1]: gw.restart() elif 'reload' == sys.argv[1]: gw.reload() elif 'foreground' == sys.argv[1]: gw.run() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart|foreground" % sys.argv[0] sys.exit(2)
__parse_nodes
identifier_name
pan2mqtt.py
#!/usr/bin/python # -*- coding:utf-8 -*- #dependence: paho-mqtt (pip install paho-mqtt) # XBee (pip install XBee) # PyYAML (pip install PyYaml) # pyserial (pip install pyserial) import os import sys import time import logging import yaml from serial import Serial from factory import * from pan import * from filters import * from plugins import * from paho.mqtt import client from daemon import Daemon import sqlite3 as database class PAN2MQTT(Daemon): """ PAN network to MQTT bridge Supported PAN radio: XBee, Mesh Bee(from seeedstudio) To port a new radio driver, two method must be implemented: on_message, send_message """ def __init__ (self, logger, cfg): """ """ Daemon.__init__(self,cfg['general']['pidfile']) self.logger = logger self.config = cfg self.mqtt_connected = False self.mqtt_subcriptions = {} self.downlink_topics = {} self.uplink_topics = {} self.pan = Factory(self.config['pan']['driver_class']) if not self.pan: self.__log(logging.ERROR, "Can't instant pan driver") sys.exit(2) self.pan.logger = logger self.pan.on_message = self.on_message_from_pan self.stdout = self.config['general']['stdout'] self.stderr = self.config['general']['stdout'] self.host = self.config['mqtt']['host'] self.client_id = self.config['mqtt']['client_id'] self.mqtt_qos = self.config['mqtt']['qos'] self.mqtt_retain = self.config['mqtt']['retain'] self.status_topic = self.config['mqtt']['status_topic'] self.mqtt_client = client.Client(self.client_id, self.config['mqtt']['clean_session']) if self.__try_get_config(self.config['mqtt'], "username", None): self.mqtt_client.username_pw_set(self.config['mqtt']['username'], self.config['mqtt']['password']) if self.config['mqtt']['set_will']: self.mqtt_client.will_set(self.status_topic.format(client_id=self.client_id), "0", self.mqtt_qos, self.mqtt_retain) self.mqtt_client.on_connect = self.on_mqtt_connect self.mqtt_client.on_disconnect = self.on_mqtt_disconnect self.mqtt_client.on_message = self.on_message_from_mqtt self.mqtt_client.on_subscribe = self.on_mqtt_subscribe self.mqtt_client.on_log = self.on_mqtt_log self.plugins = self.__try_get_config(self.config, 'plugin', None) if not isinstance(self.plugins, dict): self.plugins = {self.plugins} self.plugins_ins = {} ### private method def __log(self, level, message): if self.logger: self.logger.log(level, message) @staticmethod def __try_get_config (parent, key, default): try: return parent[key] except: return default def __parse_nodes (self): self.downlink_topics = {} self.uplink_topics = {} if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink": self.uplink_topics[(mac, topic_content['match_key'])] = (topic,self.__try_get_config(topic_content,'filter',None)) elif topic_content['dir'] == "downlink": self.downlink_topics[topic] = (mac, topic_content) else: self.__log(logging.ERROR, "Unknown 'dir'") def __sub_downlink_topics (self): if not self.mqtt_connected: return for t in self.downlink_topics: rc, mid = self.mqtt_client.subscribe(t, self.mqtt_qos) self.mqtt_subcriptions[mid] = t self.__log(logging.INFO, "Sent subscription request to topic %s" % t) def __filter (self, input, filter_config): try: filter = Factory(filter_config['type']) if filter: filter.configure(filter_config['parameters']) if filter.validate(): return filter.process(input) except: pass return input #response topic list to client which requires this def __resp_topic_list(self, dst_topic): ''' Broadcast gateway information when the gateway thread is starting ''' str_topic_holder = '' if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink" and topic_content['type'] != "listening": str_topic_holder = str_topic_holder + topic + "@" print "topic list:" + str_topic_holder self.mqtt_client.publish(dst_topic, str_topic_holder, 2) ### def on_mqtt_connect (self, client, userdata, flags, rc): if rc == 0: self.__log(logging.INFO, "Connected to MQTT broker: %s" % self.host) self.mqtt_client.publish(self.status_topic.format(client_id=self.client_id), "1") self.mqtt_connected = True self.__sub_downlink_topics() else: self.__log(logging.ERROR, "Could not connect to MQTT broker: %s" % self.host) self.__log(logging.ERROR, "Error code: %d" % rc) self.mqtt_connected = False def on_mqtt_disconnect (self, client, userdata, rc): self.mqtt_connected = False self.__log(logging.INFO, "Disconnected from MQTT broker: %s"%self.host) self.__log(logging.INFO, "Return code: %d"%rc) if rc!=0: self.__log(logging.ERROR, "Unexpected disconnect, waiting reconnect...") def on_mqtt_subscribe (self,client, userdata, mid, granted_qos):
def on_mqtt_log (self, client, userdata, level, buf): self.__log(logging.DEBUG, buf) def on_message_from_pan (self, mac, key, value, type): self.__log(logging.INFO, "Received message from PAN: %s, %s:%s" % (mac, key, value)) #walk over plugins and determin whether to drop ''' there are two callback in each plugin 1.on_message_from_pan abstract function in base description: do something when receives pan event 2.pre_publish description: do something before publish to broker ''' for name,p in self.plugins_ins.items(): if not p.on_message_from_pan(mac, key, value, type): return False #search the topic try: if self.uplink_topics[(mac,key)]: topic, filter = self.uplink_topics[(mac,key)] #apply the filter value_f = value if filter: value_f = self.__filter(value, filter) #walk over plugins and call the callback which watches on the publishment for name,p in self.plugins_ins.items(): if p.pre_publish: p.pre_publish(topic, value_f, value) #publish the topic self.__log(logging.INFO, "Publishing to topic: %s"%topic) self.mqtt_client.publish(topic, value_f, self.mqtt_qos, self.mqtt_retain) except KeyError, e: self.__log(logging.WARNING, "Received message unrecognized: %s" % e) def on_message_from_mqtt (self,client, userdata, message): self.__log(logging.INFO, "Received message from MQTT: %s: %s, qos %d" % (message.topic,message.payload,message.qos)) #walk over plugins and determin whether to drop for name,p in self.plugins_ins.items(): if not p.on_message_from_mqtt(message.topic, message.payload, message.qos): return False #search the topic if self.downlink_topics[message.topic]: mac, topic = self.downlink_topics[message.topic] #apply the filters if self.__try_get_config(topic, 'filter', None): value = self.__filter(message.payload, topic['filter']) else: value = message.payload #handle the topic types if topic['type'] == 'dio': self.pan.send_message('dio', mac, value, port = topic['dio_num']) #self.__log(logging.DEBUG, "sent dio message") elif topic['type'] == 'data': self.pan.send_message('data', mac, value) elif topic['type'] == 'rpc': pass elif topic['type'] == 'listening': #to specified client self.__resp_topic_list(str(value)) else: self.__log(logging.ERROR, "Unknown downlink handler type: %s" % topic['type']) return else: self.__log(logging.ERROR,"Received an unknown topic '%s' from mqtt" % message.topic) return def do_reload (self): self.__log(logging.DEBUG, "Reload not implemented now") def run (self): self.__log(logging.INFO, "Starting Pan2Mqtt %s" % self.config['general']['version']) #parse nodes, up/down-link channels self.__parse_nodes() #connect mqtt self.mqtt_client.connect(self.host, self.config['mqtt']['port'], self.config['mqtt']['keepalive']) sec=0 while True: if self.mqtt_connected: break else: self.mqtt_client.loop() sec=sec+1 if sec > 60: self.stop() sys.exit(2) #connect pan radio try: serial = Serial(self.config['pan']['port'], self.config['pan']['baudrate']) except Exception,e: self.__log(logging.ERROR, "Can't open serial: %s" % e) sys.exit(2) self.pan.serial = serial if not self.pan.connect(): self.stop() #start the plugins for p in self.plugins: ins = Factory(p) if ins: self.plugins_ins[p] = ins if self.__try_get_config(self.config['plugin'], p, None): self.plugins_ins[p].config = self.config['plugin'][p] self.plugins_ins[p].global_config = self.config self.plugins_ins[p].send_to_pan = self.pan.send_message self.plugins_ins[p].send_to_mqtt = self.mqtt_client.publish self.plugins_ins[p].start() else: self.__log(logging.ERROR, "Can not make the instance of %s from factory"%p) #blocking loop try: self.mqtt_client.loop_forever() except KeyboardInterrupt: self.__log(logging.ERROR, "Terminated by user") self.cleanup() def cleanup (self): self.pan.disconnect() self.__log(logging.INFO, "Cleaning up...") self.mqtt_client.disconnect() if os.path.exists(self.pidfile): os.remove(self.pidfile) for name, p in self.plugins_ins.items(): p.cleanup() sys.exit() def resolve_path(path): return path if path[0] == '/' else os.path.join(os.path.dirname(os.path.realpath(__file__)), path) if __name__ == '__main__': config_file = './pan2mqtt.yaml' fh = file(resolve_path(config_file), 'r') config = yaml.load(fh) fh.close() handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(config['general']['log_level']) logger.addHandler(handler) gw = PAN2MQTT(logger, config) if len(sys.argv) == 2: if 'start' == sys.argv[1]: gw.start() elif 'stop' == sys.argv[1]: gw.stop() elif 'restart' == sys.argv[1]: gw.restart() elif 'reload' == sys.argv[1]: gw.reload() elif 'foreground' == sys.argv[1]: gw.run() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart|foreground" % sys.argv[0] sys.exit(2)
topic = self.mqtt_subcriptions.get(mid, "Unknown") self.__log(logging.INFO, "Sub to topic %s confirmed"%topic)
identifier_body
pan2mqtt.py
#!/usr/bin/python # -*- coding:utf-8 -*- #dependence: paho-mqtt (pip install paho-mqtt) # XBee (pip install XBee) # PyYAML (pip install PyYaml) # pyserial (pip install pyserial) import os import sys import time import logging import yaml from serial import Serial from factory import * from pan import * from filters import * from plugins import * from paho.mqtt import client from daemon import Daemon import sqlite3 as database class PAN2MQTT(Daemon): """ PAN network to MQTT bridge Supported PAN radio: XBee, Mesh Bee(from seeedstudio) To port a new radio driver, two method must be implemented: on_message, send_message """ def __init__ (self, logger, cfg): """ """ Daemon.__init__(self,cfg['general']['pidfile']) self.logger = logger self.config = cfg self.mqtt_connected = False self.mqtt_subcriptions = {} self.downlink_topics = {} self.uplink_topics = {} self.pan = Factory(self.config['pan']['driver_class']) if not self.pan: self.__log(logging.ERROR, "Can't instant pan driver") sys.exit(2) self.pan.logger = logger self.pan.on_message = self.on_message_from_pan self.stdout = self.config['general']['stdout'] self.stderr = self.config['general']['stdout'] self.host = self.config['mqtt']['host'] self.client_id = self.config['mqtt']['client_id'] self.mqtt_qos = self.config['mqtt']['qos'] self.mqtt_retain = self.config['mqtt']['retain'] self.status_topic = self.config['mqtt']['status_topic'] self.mqtt_client = client.Client(self.client_id, self.config['mqtt']['clean_session']) if self.__try_get_config(self.config['mqtt'], "username", None): self.mqtt_client.username_pw_set(self.config['mqtt']['username'], self.config['mqtt']['password']) if self.config['mqtt']['set_will']: self.mqtt_client.will_set(self.status_topic.format(client_id=self.client_id), "0", self.mqtt_qos, self.mqtt_retain) self.mqtt_client.on_connect = self.on_mqtt_connect self.mqtt_client.on_disconnect = self.on_mqtt_disconnect self.mqtt_client.on_message = self.on_message_from_mqtt self.mqtt_client.on_subscribe = self.on_mqtt_subscribe self.mqtt_client.on_log = self.on_mqtt_log self.plugins = self.__try_get_config(self.config, 'plugin', None) if not isinstance(self.plugins, dict): self.plugins = {self.plugins} self.plugins_ins = {} ### private method def __log(self, level, message): if self.logger: self.logger.log(level, message) @staticmethod def __try_get_config (parent, key, default): try: return parent[key] except: return default def __parse_nodes (self): self.downlink_topics = {} self.uplink_topics = {} if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink": self.uplink_topics[(mac, topic_content['match_key'])] = (topic,self.__try_get_config(topic_content,'filter',None)) elif topic_content['dir'] == "downlink": self.downlink_topics[topic] = (mac, topic_content) else: self.__log(logging.ERROR, "Unknown 'dir'") def __sub_downlink_topics (self): if not self.mqtt_connected: return for t in self.downlink_topics: rc, mid = self.mqtt_client.subscribe(t, self.mqtt_qos) self.mqtt_subcriptions[mid] = t self.__log(logging.INFO, "Sent subscription request to topic %s" % t) def __filter (self, input, filter_config): try: filter = Factory(filter_config['type']) if filter: filter.configure(filter_config['parameters']) if filter.validate(): return filter.process(input) except: pass return input #response topic list to client which requires this def __resp_topic_list(self, dst_topic): ''' Broadcast gateway information when the gateway thread is starting ''' str_topic_holder = '' if self.config['pan']['nodes']: for mac,mac_obj in self.config['pan']['nodes'].items(): for topic,topic_content in mac_obj.items(): topic = topic.format(client_id=self.client_id) if topic_content['dir'] == "uplink" and topic_content['type'] != "listening": str_topic_holder = str_topic_holder + topic + "@" print "topic list:" + str_topic_holder self.mqtt_client.publish(dst_topic, str_topic_holder, 2) ### def on_mqtt_connect (self, client, userdata, flags, rc): if rc == 0: self.__log(logging.INFO, "Connected to MQTT broker: %s" % self.host) self.mqtt_client.publish(self.status_topic.format(client_id=self.client_id), "1") self.mqtt_connected = True self.__sub_downlink_topics() else: self.__log(logging.ERROR, "Could not connect to MQTT broker: %s" % self.host) self.__log(logging.ERROR, "Error code: %d" % rc) self.mqtt_connected = False def on_mqtt_disconnect (self, client, userdata, rc): self.mqtt_connected = False self.__log(logging.INFO, "Disconnected from MQTT broker: %s"%self.host) self.__log(logging.INFO, "Return code: %d"%rc) if rc!=0: self.__log(logging.ERROR, "Unexpected disconnect, waiting reconnect...") def on_mqtt_subscribe (self,client, userdata, mid, granted_qos): topic = self.mqtt_subcriptions.get(mid, "Unknown")
self.__log(logging.INFO, "Sub to topic %s confirmed"%topic) def on_mqtt_log (self, client, userdata, level, buf): self.__log(logging.DEBUG, buf) def on_message_from_pan (self, mac, key, value, type): self.__log(logging.INFO, "Received message from PAN: %s, %s:%s" % (mac, key, value)) #walk over plugins and determin whether to drop ''' there are two callback in each plugin 1.on_message_from_pan abstract function in base description: do something when receives pan event 2.pre_publish description: do something before publish to broker ''' for name,p in self.plugins_ins.items(): if not p.on_message_from_pan(mac, key, value, type): return False #search the topic try: if self.uplink_topics[(mac,key)]: topic, filter = self.uplink_topics[(mac,key)] #apply the filter value_f = value if filter: value_f = self.__filter(value, filter) #walk over plugins and call the callback which watches on the publishment for name,p in self.plugins_ins.items(): if p.pre_publish: p.pre_publish(topic, value_f, value) #publish the topic self.__log(logging.INFO, "Publishing to topic: %s"%topic) self.mqtt_client.publish(topic, value_f, self.mqtt_qos, self.mqtt_retain) except KeyError, e: self.__log(logging.WARNING, "Received message unrecognized: %s" % e) def on_message_from_mqtt (self,client, userdata, message): self.__log(logging.INFO, "Received message from MQTT: %s: %s, qos %d" % (message.topic,message.payload,message.qos)) #walk over plugins and determin whether to drop for name,p in self.plugins_ins.items(): if not p.on_message_from_mqtt(message.topic, message.payload, message.qos): return False #search the topic if self.downlink_topics[message.topic]: mac, topic = self.downlink_topics[message.topic] #apply the filters if self.__try_get_config(topic, 'filter', None): value = self.__filter(message.payload, topic['filter']) else: value = message.payload #handle the topic types if topic['type'] == 'dio': self.pan.send_message('dio', mac, value, port = topic['dio_num']) #self.__log(logging.DEBUG, "sent dio message") elif topic['type'] == 'data': self.pan.send_message('data', mac, value) elif topic['type'] == 'rpc': pass elif topic['type'] == 'listening': #to specified client self.__resp_topic_list(str(value)) else: self.__log(logging.ERROR, "Unknown downlink handler type: %s" % topic['type']) return else: self.__log(logging.ERROR,"Received an unknown topic '%s' from mqtt" % message.topic) return def do_reload (self): self.__log(logging.DEBUG, "Reload not implemented now") def run (self): self.__log(logging.INFO, "Starting Pan2Mqtt %s" % self.config['general']['version']) #parse nodes, up/down-link channels self.__parse_nodes() #connect mqtt self.mqtt_client.connect(self.host, self.config['mqtt']['port'], self.config['mqtt']['keepalive']) sec=0 while True: if self.mqtt_connected: break else: self.mqtt_client.loop() sec=sec+1 if sec > 60: self.stop() sys.exit(2) #connect pan radio try: serial = Serial(self.config['pan']['port'], self.config['pan']['baudrate']) except Exception,e: self.__log(logging.ERROR, "Can't open serial: %s" % e) sys.exit(2) self.pan.serial = serial if not self.pan.connect(): self.stop() #start the plugins for p in self.plugins: ins = Factory(p) if ins: self.plugins_ins[p] = ins if self.__try_get_config(self.config['plugin'], p, None): self.plugins_ins[p].config = self.config['plugin'][p] self.plugins_ins[p].global_config = self.config self.plugins_ins[p].send_to_pan = self.pan.send_message self.plugins_ins[p].send_to_mqtt = self.mqtt_client.publish self.plugins_ins[p].start() else: self.__log(logging.ERROR, "Can not make the instance of %s from factory"%p) #blocking loop try: self.mqtt_client.loop_forever() except KeyboardInterrupt: self.__log(logging.ERROR, "Terminated by user") self.cleanup() def cleanup (self): self.pan.disconnect() self.__log(logging.INFO, "Cleaning up...") self.mqtt_client.disconnect() if os.path.exists(self.pidfile): os.remove(self.pidfile) for name, p in self.plugins_ins.items(): p.cleanup() sys.exit() def resolve_path(path): return path if path[0] == '/' else os.path.join(os.path.dirname(os.path.realpath(__file__)), path) if __name__ == '__main__': config_file = './pan2mqtt.yaml' fh = file(resolve_path(config_file), 'r') config = yaml.load(fh) fh.close() handler = logging.StreamHandler() formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s') handler.setFormatter(formatter) logger = logging.getLogger() logger.setLevel(config['general']['log_level']) logger.addHandler(handler) gw = PAN2MQTT(logger, config) if len(sys.argv) == 2: if 'start' == sys.argv[1]: gw.start() elif 'stop' == sys.argv[1]: gw.stop() elif 'restart' == sys.argv[1]: gw.restart() elif 'reload' == sys.argv[1]: gw.reload() elif 'foreground' == sys.argv[1]: gw.run() else: print "Unknown command" sys.exit(2) sys.exit(0) else: print "usage: %s start|stop|restart|foreground" % sys.argv[0] sys.exit(2)
random_line_split
install_dotfiles.py
#!/usr/bin/env python """ hopefully doesn't mess anything up too badly Significant inspiration was taken from: https://github.com/python-poetry/poetry/blob/c967a4a5abc6a0edd29c57eca307894f6e1c4f16/install-poetry.py Steps: - Ensure dependencies (git) - Download repository - Run dotdrop from the repo """ import os import sys from contextlib import asynccontextmanager from pathlib import Path from shutil import which from subprocess import ( PIPE, STDOUT, CalledProcessError, CompletedProcess, Popen, TimeoutExpired, run, ) from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Dict, List, Optional, Union from unittest.mock import patch from urllib.request import urlopen trio = None if TYPE_CHECKING: from io import BufferedWriter from typing import AsyncIterator, List, Tuple, Union import trio from trio import MemoryReceiveChannel, MemorySendChannel, Process WINDOWS = sys.platform.startswith(("win", "cygwin")) or ( sys.platform == "cli" and os.name == "nt" ) UNIX = sys.platform.startswith(("linux", "freebsd", "openbsd")) MACOS = sys.platform.startswith("darwin") if WINDOWS: import winreg def win_get_user_env(name: str) -> Optional[str]: if not WINDOWS: raise NotImplementedError( "can only update environment variables on Windows for now" ) with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root: with winreg.OpenKey(root, "Environment", 0, winreg.KEY_ALL_ACCESS) as key: value, _ = winreg.QueryValueEx(key, name) return value # pylint: disable=too-many-instance-attributes,too-many-arguments class Expect: """ Manages running a process as a subprocess, and communicating with it, while echoing its output """ # From: # https://github.com/mawillcockson/dotfiles/blob/08e973f122b66ceadb009379dfed018a4b9e4eea/trio_watch_and_copy_demo.py # Which is inspired by: # https://github.com/python-trio/trio/blob/v0.19.0/trio/_subprocess.py#L587-L643 def __init__( self, process: "Process", printer_send_channel: "MemorySendChannel[bytes]", printer_receive_channel: "MemoryReceiveChannel[bytes]", notifier_send_channel: "MemorySendChannel[bytes]", opened_notifier_receive_channel: "MemoryReceiveChannel[bytes]", print_buffer: "BufferedWriter" = sys.stdout.buffer, # type: ignore ): self.process = process self.printer_send_channel = printer_send_channel self.printer_receive_channel = printer_receive_channel self.notifier_send_channel = notifier_send_channel self.opened_notifier_receive_channel = opened_notifier_receive_channel self.print_buffer = print_buffer self.stdout: bytes = b"" self.response_sent = False # NOTE: may be able to be combined with copier_recorder() async def printer( self, ) -> None: "echoes the process' output, dropping data if necessary" if not self.process: raise Exception("missing process; was this called inside a with statement?") async with self.printer_receive_channel: async for chunk in self.printer_receive_channel: try: self.print_buffer.write(chunk) except BlockingIOError: pass self.print_buffer.flush() async def copier_recorder( self, ) -> None: """ records the process' stdout, and mirrors it to printer() also sends notifications to expect() every time the process prints something """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert ( self.process.stdout is not None ), "process must be opened with stdout=PIPE and stderr=STDOUT" async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel: async for chunk in self.process.stdout: # print(f"seen chunk: '{chunk!r}'", flush=True) # debug self.stdout += chunk await self.printer_send_channel.send(chunk) # send notification # if it's full, that's fine: if expect() is run, it'll see # there's a "pending" notification and check stdout, then wait # for another notification try: self.notifier_send_channel.send_nowait(b"") except trio.WouldBlock: pass except trio.BrokenResourceError as err: print(f"cause '{err.__cause__}'") raise err async def expect( self, watch_for: bytes, respond_with: bytes, ) -> None: """ called inside Expect.open_process()'s with block to watch for, and respond to, the process' output """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert self.process.stdin is not None, "process must be opened with stdin=PIPE" # NOTE: This could be improved to show which responses were sent, and which # weren't self.response_sent = False async with self.opened_notifier_receive_channel.clone() as notifier_receive_channel: # print("expect --> opened notifier channel", flush=True) # debug async for _ in notifier_receive_channel: # print("expect --> received chunk notification", flush=True) # debug if not self.response_sent and watch_for in self.stdout: # print("expect --> sending response...", flush=True) # debug await self.process.stdin.send_all(respond_with) self.response_sent = True # print("expect --> response sent", flush=True) # debug @classmethod @asynccontextmanager async def open_process( cls, args: "Union[str, List[str]]", env_additions: Dict[str, str] = {} ) -> "AsyncIterator[Expect]": """ entry point for using Expect() opens the process, opens a nursery, and starts the copier and printer this waits until the process is finished, so wrapping in a trio.move_on_after() is good to use as a timeout """ printer_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(1) printer_send_channel, printer_receive_channel = printer_channels notifier_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(0) notifier_send_channel, notifier_receive_channel = notifier_channels async with notifier_receive_channel: with patch.dict("os.environ", values=env_additions) as patched_env: async with await trio.open_process( args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=patched_env ) as process: async with trio.open_nursery() as nursery: expect = cls( process=process, printer_send_channel=printer_send_channel, printer_receive_channel=printer_receive_channel, notifier_send_channel=notifier_send_channel, opened_notifier_receive_channel=notifier_receive_channel, ) nursery.start_soon(expect.copier_recorder) nursery.start_soon(expect.printer) yield expect # print("waiting for process") # debug await expect.process.wait() class Bootstrapper: UPDATED_ENVIRONMENT: Dict[str, str] = {} SHELL: Optional[Path] = None _SCOOP_INSTALLED = False _PIP_INSTALLED = False TEMP_DIR: Optional[Path] = None PIP_DIR: Optional[Path] = None VIRTUALENV_INSTALL_DIR: Optional[Path] = None VENV_DIR: Optional[Path] = None CACHE_DIR: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable def __init__(self, temp_dir: Path) -> None: if WINDOWS: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path self.REPOSITORY_DIR = Path("~/projects/dotfiles/").expanduser().resolve() self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.PIP_DIR = self.TEMP_DIR / "pip" self.PIP_DIR.mkdir(exist_ok=True) self.VIRTUALENV_INSTALL_DIR = self.TEMP_DIR / "virtualenv" self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.VENV_DIR = self.TEMP_DIR / "venv" self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = self.TEMP_DIR / "cache" self.CACHE_DIR.mkdir(exist_ok=True) self._PIP_INSTALLED = ( self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", "--version"]).returncode == 0 ) self._PIP_INSTALLED = False def cmd(self, args: List[str], stdin: str = "") -> CompletedProcess: print(f"running -> {args!r}") with patch.dict("os.environ", values=self.UPDATED_ENVIRONMENT) as patched_env: result = run( args, stdin=(stdin or PIPE), stderr=STDOUT, stdout=PIPE, check=False, env=patched_env, ) print(result.stdout.decode() or "") return result def shell(self, code: str) -> CompletedProcess: print(f'shell -> "{code}"') if self.UPDATED_ENVIRONMENT: with patch.dict( "os.environ", values=self.UPDATED_ENVIRONMENT ) as patched_env: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, env=patched_env, ) else: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, ) if result.stdout: print(result.stdout) if result.stderr: print(result.stderr) return result def main(self) -> None: try: import virtualenv except ImportError: self.bootstrap_virtualenv() import virtualenv # isort:skip session = virtualenv.cli_run([str(self.VENV_DIR), "--clear", "--download"]) if WINDOWS: venv_python = self.VENV_DIR / "Scripts" / "python.exe" venv_modules = self.VENV_DIR / "Lib" / "site-packages" else: raise NotImplementedError("only Windows supported right now") if not (venv_python and venv_python.is_file()): raise Exception( f"could not find a virtual environment python at '{venv_python}'" ) assert venv_modules.is_dir(), f"missing directory '{venv_modules}'" self.PYTHON_EXECUTABLE = str(venv_python) sys.path.insert(0, str(venv_modules)) # Install trio self.pip(["install", "trio"]) import trio as trio_module # isort:skip global trio trio = trio_module installer = Installer( temp_dir=self.TEMP_DIR, repository_dir=self.REPOSITORY_DIR, shell=self.SHELL, venv_dir=self.VENV_DIR, cache_dir=self.CACHE_DIR, python_executable=self.PYTHON_EXECUTABLE, updated_environment=self.UPDATED_ENVIRONMENT, ) trio.run(installer.main) def bootstrap_virtualenv(self) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.pip( ["install", "virtualenv", "--target", str(self.VIRTUALENV_INSTALL_DIR)] ) sys.path.insert(0, str(self.VIRTUALENV_INSTALL_DIR)) import virtualenv # isort:skip def bootstrap_pip(self) -> None: if self._PIP_INSTALLED: return # NOTE: On Windows, the SSL certificates for some reason aren't # available until a web request is made that absolutely requires # them # If it's a truly fresh install, then any urlopen() call to an # https:// url will fail with an SSL context error:
get_pip_file = self.CACHE_DIR / "get_pip.py" get_pip_file.touch() with get_pip_file.open(mode="wb") as file: with urlopen("https://bootstrap.pypa.io/get-pip.py") as request: while request.peek(1): file.write(request.read(8192)) # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd( [ self.PYTHON_EXECUTABLE, str(get_pip_file), "--target", str(self.PIP_DIR), "--no-user", ] ) sys.path.insert(0, str(self.PIP_DIR)) # Causes Python to find the downloaded pip module self.UPDATED_ENVIRONMENT["PYTHONPATH"] = str(self.PIP_DIR) self._PIP_INSTALLED = True def pip(self, args: List[str]) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"]) class Installer: SHELL: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable UPDATED_ENVIRONMENT: Dict[str, str] = {} _SCOOP_INSTALLED: bool = False PROCESS_TYPES: Dict[str, str] = { "cmd": "{0!r}", "shell": '"{0}"', "pip": "{0}", "scoop": "{0}", } REPO_URL = "https://github.com/mawillcockson/dotfiles.git" def __init__( self, temp_dir: Path, repository_dir: Path, shell: Optional[Path] = None, venv_dir: Optional[Path] = None, cache_dir: Optional[Path] = None, python_executable: str = sys.executable, updated_environment: Dict[str, str] = {}, ) -> None: if WINDOWS: if not shell: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path else: self.SHELL = shell self.REPOSITORY_DIR = repository_dir self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.VENV_DIR = venv_dir or (self.TEMP_DIR / "venv") self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = cache_dir or (self.TEMP_DIR / "cache") self.CACHE_DIR.mkdir(exist_ok=True) self.PYTHON_EXECUTABLE = python_executable self.UPDATED_ENVIRONMENT.update(updated_environment) async def cmd( self, args: List[str], check: bool = True, process_type: str = "cmd", ) -> "Expect": args_str = self.PROCESS_TYPES.get( process_type, self.PROCESS_TYPES["cmd"] ).format(args) cmd_str = f"{process_type} -> {args_str}" print(cmd_str) async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT, ) as expect: pass if check and expect.process.returncode != 0: raise CalledProcessError("returncode is not 0") return expect async def pip(self, args: List[str]) -> "Expect": return await self.cmd( [self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"], process_type="pip", ) async def shell( self, code: str, check: bool = True, process_type: str = "shell" ) -> "Expect": # NOTE: "{shell} -c {script}" works with powershell, sh (bash, dash, etc), not sure about other platforms return await self.cmd( [str(self.SHELL), "-c", code], check=check, process_type=process_type ) async def scoop(self, args: str) -> "Expect": if not (WINDOWS and self._SCOOP_INSTALLED): raise Exception( "not running scoop when not on Windows or scoop not installed" ) return await self.shell(f"scoop {args}", check=True, process_type="scoop") async def install_scoop(self) -> None: if not WINDOWS: raise Exception("not installing scoop when not on Windows") # Check if scoop is already installed self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") expect = await self.shell("scoop which scoop", check=False) self._SCOOP_INSTALLED = ( "is not recognized as the name of" not in expect.stdout.decode() and expect.process.returncode == 0 ) if not self._SCOOP_INSTALLED: # Set PowerShell's Execution Policy args = [ str(self.SHELL), "-c", "& {Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser}", ] print(f"running -> {args!r}") with trio.move_on_after(7): async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT ) as expect: with trio.move_on_after(2): await expect.expect( watch_for=b'(default is "N"):', respond_with=b"A", ) # NOTE: don't have to check if the response was sent, because # sometimes the execution policy is set without ever sending a # response (i.e. if the execution policy was already set). # Instead, just check if the policy is set correctly. result = await self.cmd( [str(self.SHELL), "-c", "& {Get-ExecutionPolicy}"], check=False ) if not "RemoteSigned" in result.stdout.decode(): raise Exception("could not set PowerShell Execution Policy") # Install Scoop result = await self.cmd( [str(self.SHELL), "-c", "& {iwr -useb https://get.scoop.sh | iex}"] ) stdout = result.stdout.decode().lower() if not ( "scoop was installed successfully!" in stdout or "scoop is already installed" in stdout ): raise Exception("scoop was not installed") self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") self._SCOOP_INSTALLED = True installed_apps = (await self.scoop("list")).stdout.decode() for requirement in ["aria2", "git", "python"]: if requirement in installed_apps: continue await self.scoop(f"install {requirement}") wanted_buckets = ["extras"] added_buckets = (await self.scoop("bucket list")).stdout.decode() for bucket in wanted_buckets: if bucket in added_buckets: continue await self.scoop(f"bucket add {bucket}") async def main(self) -> None: # Install rest of dependencies if MACOS or UNIX: raise NotImplementedError("only Windows supported currently") if WINDOWS: # implicitly installs git as well await self.install_scoop() for dependency_check in (["git", "--version"], ["python", "--version"]): try: await self.cmd(dependency_check, check=True) except CalledProcessError as err: raise Exception( f"dependency '{dependency_check!r}' was not found" ) from err ## Clone dotfiles repository self.REPOSITORY_DIR.mkdir(parents=True, exist_ok=True) # Check if there's an existing repository, and if that repository is clean # NOTE::FUTURE dulwich does not support submodules # https://github.com/dulwich/dulwich/issues/506 repo_status = await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "status", "--porcelain"], check=False) if "not a git repository" in repo_status.stdout.decode().lower(): await self.cmd( [ "git", "clone", "--recurse-submodules", self.REPO_URL, str(self.REPOSITORY_DIR), ] ) # Three scenarios: # - Repo exists and is completely clean and up to date # - Repo exists and there are uncommitted changes # - Repo exists and there are un-pushed changes # # The last one can be helped with dulwich if issue 506 is resolved, or # complex git commands, like: # https://stackoverflow.com/a/6133968 # # For now I'm saying "deal with it manually" # - Repo exists and there are changes # NOTE: optimistically try to pull in new upstream changes; could fail in numerous ways await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "pull", "--ff-only"]) # Run dotdrop raise NotImplementedError("setup dotfiles") if __name__ == "__main__": with TemporaryDirectory() as temp_dir: temp_dir_path = Path(temp_dir).resolve(strict=True) bootstrapper = Bootstrapper(temp_dir_path) bootstrapper.main() # import trio # isort:skip # installer = Installer( # temp_dir=bootstrapper.TEMP_DIR, # repository_dir=bootstrapper.REPOSITORY_DIR, # shell=bootstrapper.SHELL, # venv_dir=bootstrapper.VENV_DIR, # cache_dir=bootstrapper.CACHE_DIR, # python_executable=bootstrapper.PYTHON_EXECUTABLE, # updated_environment=bootstrapper.UPDATED_ENVIRONMENT, # ) # trio.run(installer.main)
# >> ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate self.shell("iwr -useb https://bootstrap.pypa.io") # https://pip.pypa.io/en/stable/installation/#get-pip-py
random_line_split
install_dotfiles.py
#!/usr/bin/env python """ hopefully doesn't mess anything up too badly Significant inspiration was taken from: https://github.com/python-poetry/poetry/blob/c967a4a5abc6a0edd29c57eca307894f6e1c4f16/install-poetry.py Steps: - Ensure dependencies (git) - Download repository - Run dotdrop from the repo """ import os import sys from contextlib import asynccontextmanager from pathlib import Path from shutil import which from subprocess import ( PIPE, STDOUT, CalledProcessError, CompletedProcess, Popen, TimeoutExpired, run, ) from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Dict, List, Optional, Union from unittest.mock import patch from urllib.request import urlopen trio = None if TYPE_CHECKING: from io import BufferedWriter from typing import AsyncIterator, List, Tuple, Union import trio from trio import MemoryReceiveChannel, MemorySendChannel, Process WINDOWS = sys.platform.startswith(("win", "cygwin")) or ( sys.platform == "cli" and os.name == "nt" ) UNIX = sys.platform.startswith(("linux", "freebsd", "openbsd")) MACOS = sys.platform.startswith("darwin") if WINDOWS: import winreg def win_get_user_env(name: str) -> Optional[str]: if not WINDOWS: raise NotImplementedError( "can only update environment variables on Windows for now" ) with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root: with winreg.OpenKey(root, "Environment", 0, winreg.KEY_ALL_ACCESS) as key: value, _ = winreg.QueryValueEx(key, name) return value # pylint: disable=too-many-instance-attributes,too-many-arguments class Expect: """ Manages running a process as a subprocess, and communicating with it, while echoing its output """ # From: # https://github.com/mawillcockson/dotfiles/blob/08e973f122b66ceadb009379dfed018a4b9e4eea/trio_watch_and_copy_demo.py # Which is inspired by: # https://github.com/python-trio/trio/blob/v0.19.0/trio/_subprocess.py#L587-L643 def __init__( self, process: "Process", printer_send_channel: "MemorySendChannel[bytes]", printer_receive_channel: "MemoryReceiveChannel[bytes]", notifier_send_channel: "MemorySendChannel[bytes]", opened_notifier_receive_channel: "MemoryReceiveChannel[bytes]", print_buffer: "BufferedWriter" = sys.stdout.buffer, # type: ignore ): self.process = process self.printer_send_channel = printer_send_channel self.printer_receive_channel = printer_receive_channel self.notifier_send_channel = notifier_send_channel self.opened_notifier_receive_channel = opened_notifier_receive_channel self.print_buffer = print_buffer self.stdout: bytes = b"" self.response_sent = False # NOTE: may be able to be combined with copier_recorder() async def printer( self, ) -> None: "echoes the process' output, dropping data if necessary" if not self.process: raise Exception("missing process; was this called inside a with statement?") async with self.printer_receive_channel: async for chunk in self.printer_receive_channel: try: self.print_buffer.write(chunk) except BlockingIOError: pass self.print_buffer.flush() async def copier_recorder( self, ) -> None: """ records the process' stdout, and mirrors it to printer() also sends notifications to expect() every time the process prints something """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert ( self.process.stdout is not None ), "process must be opened with stdout=PIPE and stderr=STDOUT" async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel: async for chunk in self.process.stdout: # print(f"seen chunk: '{chunk!r}'", flush=True) # debug self.stdout += chunk await self.printer_send_channel.send(chunk) # send notification # if it's full, that's fine: if expect() is run, it'll see # there's a "pending" notification and check stdout, then wait # for another notification try: self.notifier_send_channel.send_nowait(b"") except trio.WouldBlock: pass except trio.BrokenResourceError as err: print(f"cause '{err.__cause__}'") raise err async def expect( self, watch_for: bytes, respond_with: bytes, ) -> None: """ called inside Expect.open_process()'s with block to watch for, and respond to, the process' output """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert self.process.stdin is not None, "process must be opened with stdin=PIPE" # NOTE: This could be improved to show which responses were sent, and which # weren't self.response_sent = False async with self.opened_notifier_receive_channel.clone() as notifier_receive_channel: # print("expect --> opened notifier channel", flush=True) # debug async for _ in notifier_receive_channel: # print("expect --> received chunk notification", flush=True) # debug if not self.response_sent and watch_for in self.stdout: # print("expect --> sending response...", flush=True) # debug await self.process.stdin.send_all(respond_with) self.response_sent = True # print("expect --> response sent", flush=True) # debug @classmethod @asynccontextmanager async def open_process( cls, args: "Union[str, List[str]]", env_additions: Dict[str, str] = {} ) -> "AsyncIterator[Expect]": """ entry point for using Expect() opens the process, opens a nursery, and starts the copier and printer this waits until the process is finished, so wrapping in a trio.move_on_after() is good to use as a timeout """ printer_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(1) printer_send_channel, printer_receive_channel = printer_channels notifier_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(0) notifier_send_channel, notifier_receive_channel = notifier_channels async with notifier_receive_channel: with patch.dict("os.environ", values=env_additions) as patched_env: async with await trio.open_process( args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=patched_env ) as process: async with trio.open_nursery() as nursery: expect = cls( process=process, printer_send_channel=printer_send_channel, printer_receive_channel=printer_receive_channel, notifier_send_channel=notifier_send_channel, opened_notifier_receive_channel=notifier_receive_channel, ) nursery.start_soon(expect.copier_recorder) nursery.start_soon(expect.printer) yield expect # print("waiting for process") # debug await expect.process.wait() class Bootstrapper: UPDATED_ENVIRONMENT: Dict[str, str] = {} SHELL: Optional[Path] = None _SCOOP_INSTALLED = False _PIP_INSTALLED = False TEMP_DIR: Optional[Path] = None PIP_DIR: Optional[Path] = None VIRTUALENV_INSTALL_DIR: Optional[Path] = None VENV_DIR: Optional[Path] = None CACHE_DIR: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable def __init__(self, temp_dir: Path) -> None: if WINDOWS: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path self.REPOSITORY_DIR = Path("~/projects/dotfiles/").expanduser().resolve() self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.PIP_DIR = self.TEMP_DIR / "pip" self.PIP_DIR.mkdir(exist_ok=True) self.VIRTUALENV_INSTALL_DIR = self.TEMP_DIR / "virtualenv" self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.VENV_DIR = self.TEMP_DIR / "venv" self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = self.TEMP_DIR / "cache" self.CACHE_DIR.mkdir(exist_ok=True) self._PIP_INSTALLED = ( self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", "--version"]).returncode == 0 ) self._PIP_INSTALLED = False def cmd(self, args: List[str], stdin: str = "") -> CompletedProcess: print(f"running -> {args!r}") with patch.dict("os.environ", values=self.UPDATED_ENVIRONMENT) as patched_env: result = run( args, stdin=(stdin or PIPE), stderr=STDOUT, stdout=PIPE, check=False, env=patched_env, ) print(result.stdout.decode() or "") return result def shell(self, code: str) -> CompletedProcess: print(f'shell -> "{code}"') if self.UPDATED_ENVIRONMENT: with patch.dict( "os.environ", values=self.UPDATED_ENVIRONMENT ) as patched_env: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, env=patched_env, ) else: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, ) if result.stdout: print(result.stdout) if result.stderr: print(result.stderr) return result def main(self) -> None: try: import virtualenv except ImportError: self.bootstrap_virtualenv() import virtualenv # isort:skip session = virtualenv.cli_run([str(self.VENV_DIR), "--clear", "--download"]) if WINDOWS: venv_python = self.VENV_DIR / "Scripts" / "python.exe" venv_modules = self.VENV_DIR / "Lib" / "site-packages" else: raise NotImplementedError("only Windows supported right now") if not (venv_python and venv_python.is_file()): raise Exception( f"could not find a virtual environment python at '{venv_python}'" ) assert venv_modules.is_dir(), f"missing directory '{venv_modules}'" self.PYTHON_EXECUTABLE = str(venv_python) sys.path.insert(0, str(venv_modules)) # Install trio self.pip(["install", "trio"]) import trio as trio_module # isort:skip global trio trio = trio_module installer = Installer( temp_dir=self.TEMP_DIR, repository_dir=self.REPOSITORY_DIR, shell=self.SHELL, venv_dir=self.VENV_DIR, cache_dir=self.CACHE_DIR, python_executable=self.PYTHON_EXECUTABLE, updated_environment=self.UPDATED_ENVIRONMENT, ) trio.run(installer.main) def bootstrap_virtualenv(self) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.pip( ["install", "virtualenv", "--target", str(self.VIRTUALENV_INSTALL_DIR)] ) sys.path.insert(0, str(self.VIRTUALENV_INSTALL_DIR)) import virtualenv # isort:skip def bootstrap_pip(self) -> None: if self._PIP_INSTALLED: return # NOTE: On Windows, the SSL certificates for some reason aren't # available until a web request is made that absolutely requires # them # If it's a truly fresh install, then any urlopen() call to an # https:// url will fail with an SSL context error: # >> ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate self.shell("iwr -useb https://bootstrap.pypa.io") # https://pip.pypa.io/en/stable/installation/#get-pip-py get_pip_file = self.CACHE_DIR / "get_pip.py" get_pip_file.touch() with get_pip_file.open(mode="wb") as file: with urlopen("https://bootstrap.pypa.io/get-pip.py") as request: while request.peek(1): file.write(request.read(8192)) # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd( [ self.PYTHON_EXECUTABLE, str(get_pip_file), "--target", str(self.PIP_DIR), "--no-user", ] ) sys.path.insert(0, str(self.PIP_DIR)) # Causes Python to find the downloaded pip module self.UPDATED_ENVIRONMENT["PYTHONPATH"] = str(self.PIP_DIR) self._PIP_INSTALLED = True def pip(self, args: List[str]) -> None:
class Installer: SHELL: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable UPDATED_ENVIRONMENT: Dict[str, str] = {} _SCOOP_INSTALLED: bool = False PROCESS_TYPES: Dict[str, str] = { "cmd": "{0!r}", "shell": '"{0}"', "pip": "{0}", "scoop": "{0}", } REPO_URL = "https://github.com/mawillcockson/dotfiles.git" def __init__( self, temp_dir: Path, repository_dir: Path, shell: Optional[Path] = None, venv_dir: Optional[Path] = None, cache_dir: Optional[Path] = None, python_executable: str = sys.executable, updated_environment: Dict[str, str] = {}, ) -> None: if WINDOWS: if not shell: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path else: self.SHELL = shell self.REPOSITORY_DIR = repository_dir self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.VENV_DIR = venv_dir or (self.TEMP_DIR / "venv") self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = cache_dir or (self.TEMP_DIR / "cache") self.CACHE_DIR.mkdir(exist_ok=True) self.PYTHON_EXECUTABLE = python_executable self.UPDATED_ENVIRONMENT.update(updated_environment) async def cmd( self, args: List[str], check: bool = True, process_type: str = "cmd", ) -> "Expect": args_str = self.PROCESS_TYPES.get( process_type, self.PROCESS_TYPES["cmd"] ).format(args) cmd_str = f"{process_type} -> {args_str}" print(cmd_str) async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT, ) as expect: pass if check and expect.process.returncode != 0: raise CalledProcessError("returncode is not 0") return expect async def pip(self, args: List[str]) -> "Expect": return await self.cmd( [self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"], process_type="pip", ) async def shell( self, code: str, check: bool = True, process_type: str = "shell" ) -> "Expect": # NOTE: "{shell} -c {script}" works with powershell, sh (bash, dash, etc), not sure about other platforms return await self.cmd( [str(self.SHELL), "-c", code], check=check, process_type=process_type ) async def scoop(self, args: str) -> "Expect": if not (WINDOWS and self._SCOOP_INSTALLED): raise Exception( "not running scoop when not on Windows or scoop not installed" ) return await self.shell(f"scoop {args}", check=True, process_type="scoop") async def install_scoop(self) -> None: if not WINDOWS: raise Exception("not installing scoop when not on Windows") # Check if scoop is already installed self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") expect = await self.shell("scoop which scoop", check=False) self._SCOOP_INSTALLED = ( "is not recognized as the name of" not in expect.stdout.decode() and expect.process.returncode == 0 ) if not self._SCOOP_INSTALLED: # Set PowerShell's Execution Policy args = [ str(self.SHELL), "-c", "& {Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser}", ] print(f"running -> {args!r}") with trio.move_on_after(7): async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT ) as expect: with trio.move_on_after(2): await expect.expect( watch_for=b'(default is "N"):', respond_with=b"A", ) # NOTE: don't have to check if the response was sent, because # sometimes the execution policy is set without ever sending a # response (i.e. if the execution policy was already set). # Instead, just check if the policy is set correctly. result = await self.cmd( [str(self.SHELL), "-c", "& {Get-ExecutionPolicy}"], check=False ) if not "RemoteSigned" in result.stdout.decode(): raise Exception("could not set PowerShell Execution Policy") # Install Scoop result = await self.cmd( [str(self.SHELL), "-c", "& {iwr -useb https://get.scoop.sh | iex}"] ) stdout = result.stdout.decode().lower() if not ( "scoop was installed successfully!" in stdout or "scoop is already installed" in stdout ): raise Exception("scoop was not installed") self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") self._SCOOP_INSTALLED = True installed_apps = (await self.scoop("list")).stdout.decode() for requirement in ["aria2", "git", "python"]: if requirement in installed_apps: continue await self.scoop(f"install {requirement}") wanted_buckets = ["extras"] added_buckets = (await self.scoop("bucket list")).stdout.decode() for bucket in wanted_buckets: if bucket in added_buckets: continue await self.scoop(f"bucket add {bucket}") async def main(self) -> None: # Install rest of dependencies if MACOS or UNIX: raise NotImplementedError("only Windows supported currently") if WINDOWS: # implicitly installs git as well await self.install_scoop() for dependency_check in (["git", "--version"], ["python", "--version"]): try: await self.cmd(dependency_check, check=True) except CalledProcessError as err: raise Exception( f"dependency '{dependency_check!r}' was not found" ) from err ## Clone dotfiles repository self.REPOSITORY_DIR.mkdir(parents=True, exist_ok=True) # Check if there's an existing repository, and if that repository is clean # NOTE::FUTURE dulwich does not support submodules # https://github.com/dulwich/dulwich/issues/506 repo_status = await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "status", "--porcelain"], check=False) if "not a git repository" in repo_status.stdout.decode().lower(): await self.cmd( [ "git", "clone", "--recurse-submodules", self.REPO_URL, str(self.REPOSITORY_DIR), ] ) # Three scenarios: # - Repo exists and is completely clean and up to date # - Repo exists and there are uncommitted changes # - Repo exists and there are un-pushed changes # # The last one can be helped with dulwich if issue 506 is resolved, or # complex git commands, like: # https://stackoverflow.com/a/6133968 # # For now I'm saying "deal with it manually" # - Repo exists and there are changes # NOTE: optimistically try to pull in new upstream changes; could fail in numerous ways await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "pull", "--ff-only"]) # Run dotdrop raise NotImplementedError("setup dotfiles") if __name__ == "__main__": with TemporaryDirectory() as temp_dir: temp_dir_path = Path(temp_dir).resolve(strict=True) bootstrapper = Bootstrapper(temp_dir_path) bootstrapper.main() # import trio # isort:skip # installer = Installer( # temp_dir=bootstrapper.TEMP_DIR, # repository_dir=bootstrapper.REPOSITORY_DIR, # shell=bootstrapper.SHELL, # venv_dir=bootstrapper.VENV_DIR, # cache_dir=bootstrapper.CACHE_DIR, # python_executable=bootstrapper.PYTHON_EXECUTABLE, # updated_environment=bootstrapper.UPDATED_ENVIRONMENT, # ) # trio.run(installer.main)
if not self._PIP_INSTALLED: self.bootstrap_pip() # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"])
identifier_body
install_dotfiles.py
#!/usr/bin/env python """ hopefully doesn't mess anything up too badly Significant inspiration was taken from: https://github.com/python-poetry/poetry/blob/c967a4a5abc6a0edd29c57eca307894f6e1c4f16/install-poetry.py Steps: - Ensure dependencies (git) - Download repository - Run dotdrop from the repo """ import os import sys from contextlib import asynccontextmanager from pathlib import Path from shutil import which from subprocess import ( PIPE, STDOUT, CalledProcessError, CompletedProcess, Popen, TimeoutExpired, run, ) from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Dict, List, Optional, Union from unittest.mock import patch from urllib.request import urlopen trio = None if TYPE_CHECKING: from io import BufferedWriter from typing import AsyncIterator, List, Tuple, Union import trio from trio import MemoryReceiveChannel, MemorySendChannel, Process WINDOWS = sys.platform.startswith(("win", "cygwin")) or ( sys.platform == "cli" and os.name == "nt" ) UNIX = sys.platform.startswith(("linux", "freebsd", "openbsd")) MACOS = sys.platform.startswith("darwin") if WINDOWS: import winreg def win_get_user_env(name: str) -> Optional[str]: if not WINDOWS: raise NotImplementedError( "can only update environment variables on Windows for now" ) with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root: with winreg.OpenKey(root, "Environment", 0, winreg.KEY_ALL_ACCESS) as key: value, _ = winreg.QueryValueEx(key, name) return value # pylint: disable=too-many-instance-attributes,too-many-arguments class Expect: """ Manages running a process as a subprocess, and communicating with it, while echoing its output """ # From: # https://github.com/mawillcockson/dotfiles/blob/08e973f122b66ceadb009379dfed018a4b9e4eea/trio_watch_and_copy_demo.py # Which is inspired by: # https://github.com/python-trio/trio/blob/v0.19.0/trio/_subprocess.py#L587-L643 def __init__( self, process: "Process", printer_send_channel: "MemorySendChannel[bytes]", printer_receive_channel: "MemoryReceiveChannel[bytes]", notifier_send_channel: "MemorySendChannel[bytes]", opened_notifier_receive_channel: "MemoryReceiveChannel[bytes]", print_buffer: "BufferedWriter" = sys.stdout.buffer, # type: ignore ): self.process = process self.printer_send_channel = printer_send_channel self.printer_receive_channel = printer_receive_channel self.notifier_send_channel = notifier_send_channel self.opened_notifier_receive_channel = opened_notifier_receive_channel self.print_buffer = print_buffer self.stdout: bytes = b"" self.response_sent = False # NOTE: may be able to be combined with copier_recorder() async def printer( self, ) -> None: "echoes the process' output, dropping data if necessary" if not self.process: raise Exception("missing process; was this called inside a with statement?") async with self.printer_receive_channel: async for chunk in self.printer_receive_channel: try: self.print_buffer.write(chunk) except BlockingIOError: pass self.print_buffer.flush() async def copier_recorder( self, ) -> None: """ records the process' stdout, and mirrors it to printer() also sends notifications to expect() every time the process prints something """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert ( self.process.stdout is not None ), "process must be opened with stdout=PIPE and stderr=STDOUT" async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel: async for chunk in self.process.stdout: # print(f"seen chunk: '{chunk!r}'", flush=True) # debug self.stdout += chunk await self.printer_send_channel.send(chunk) # send notification # if it's full, that's fine: if expect() is run, it'll see # there's a "pending" notification and check stdout, then wait # for another notification try: self.notifier_send_channel.send_nowait(b"") except trio.WouldBlock: pass except trio.BrokenResourceError as err: print(f"cause '{err.__cause__}'") raise err async def expect( self, watch_for: bytes, respond_with: bytes, ) -> None: """ called inside Expect.open_process()'s with block to watch for, and respond to, the process' output """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert self.process.stdin is not None, "process must be opened with stdin=PIPE" # NOTE: This could be improved to show which responses were sent, and which # weren't self.response_sent = False async with self.opened_notifier_receive_channel.clone() as notifier_receive_channel: # print("expect --> opened notifier channel", flush=True) # debug async for _ in notifier_receive_channel: # print("expect --> received chunk notification", flush=True) # debug if not self.response_sent and watch_for in self.stdout: # print("expect --> sending response...", flush=True) # debug await self.process.stdin.send_all(respond_with) self.response_sent = True # print("expect --> response sent", flush=True) # debug @classmethod @asynccontextmanager async def open_process( cls, args: "Union[str, List[str]]", env_additions: Dict[str, str] = {} ) -> "AsyncIterator[Expect]": """ entry point for using Expect() opens the process, opens a nursery, and starts the copier and printer this waits until the process is finished, so wrapping in a trio.move_on_after() is good to use as a timeout """ printer_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(1) printer_send_channel, printer_receive_channel = printer_channels notifier_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(0) notifier_send_channel, notifier_receive_channel = notifier_channels async with notifier_receive_channel: with patch.dict("os.environ", values=env_additions) as patched_env: async with await trio.open_process( args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=patched_env ) as process: async with trio.open_nursery() as nursery: expect = cls( process=process, printer_send_channel=printer_send_channel, printer_receive_channel=printer_receive_channel, notifier_send_channel=notifier_send_channel, opened_notifier_receive_channel=notifier_receive_channel, ) nursery.start_soon(expect.copier_recorder) nursery.start_soon(expect.printer) yield expect # print("waiting for process") # debug await expect.process.wait() class Bootstrapper: UPDATED_ENVIRONMENT: Dict[str, str] = {} SHELL: Optional[Path] = None _SCOOP_INSTALLED = False _PIP_INSTALLED = False TEMP_DIR: Optional[Path] = None PIP_DIR: Optional[Path] = None VIRTUALENV_INSTALL_DIR: Optional[Path] = None VENV_DIR: Optional[Path] = None CACHE_DIR: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable def __init__(self, temp_dir: Path) -> None: if WINDOWS: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path self.REPOSITORY_DIR = Path("~/projects/dotfiles/").expanduser().resolve() self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.PIP_DIR = self.TEMP_DIR / "pip" self.PIP_DIR.mkdir(exist_ok=True) self.VIRTUALENV_INSTALL_DIR = self.TEMP_DIR / "virtualenv" self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.VENV_DIR = self.TEMP_DIR / "venv" self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = self.TEMP_DIR / "cache" self.CACHE_DIR.mkdir(exist_ok=True) self._PIP_INSTALLED = ( self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", "--version"]).returncode == 0 ) self._PIP_INSTALLED = False def cmd(self, args: List[str], stdin: str = "") -> CompletedProcess: print(f"running -> {args!r}") with patch.dict("os.environ", values=self.UPDATED_ENVIRONMENT) as patched_env: result = run( args, stdin=(stdin or PIPE), stderr=STDOUT, stdout=PIPE, check=False, env=patched_env, ) print(result.stdout.decode() or "") return result def shell(self, code: str) -> CompletedProcess: print(f'shell -> "{code}"') if self.UPDATED_ENVIRONMENT: with patch.dict( "os.environ", values=self.UPDATED_ENVIRONMENT ) as patched_env: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, env=patched_env, ) else: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, ) if result.stdout: print(result.stdout) if result.stderr: print(result.stderr) return result def main(self) -> None: try: import virtualenv except ImportError: self.bootstrap_virtualenv() import virtualenv # isort:skip session = virtualenv.cli_run([str(self.VENV_DIR), "--clear", "--download"]) if WINDOWS: venv_python = self.VENV_DIR / "Scripts" / "python.exe" venv_modules = self.VENV_DIR / "Lib" / "site-packages" else: raise NotImplementedError("only Windows supported right now") if not (venv_python and venv_python.is_file()):
assert venv_modules.is_dir(), f"missing directory '{venv_modules}'" self.PYTHON_EXECUTABLE = str(venv_python) sys.path.insert(0, str(venv_modules)) # Install trio self.pip(["install", "trio"]) import trio as trio_module # isort:skip global trio trio = trio_module installer = Installer( temp_dir=self.TEMP_DIR, repository_dir=self.REPOSITORY_DIR, shell=self.SHELL, venv_dir=self.VENV_DIR, cache_dir=self.CACHE_DIR, python_executable=self.PYTHON_EXECUTABLE, updated_environment=self.UPDATED_ENVIRONMENT, ) trio.run(installer.main) def bootstrap_virtualenv(self) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.pip( ["install", "virtualenv", "--target", str(self.VIRTUALENV_INSTALL_DIR)] ) sys.path.insert(0, str(self.VIRTUALENV_INSTALL_DIR)) import virtualenv # isort:skip def bootstrap_pip(self) -> None: if self._PIP_INSTALLED: return # NOTE: On Windows, the SSL certificates for some reason aren't # available until a web request is made that absolutely requires # them # If it's a truly fresh install, then any urlopen() call to an # https:// url will fail with an SSL context error: # >> ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate self.shell("iwr -useb https://bootstrap.pypa.io") # https://pip.pypa.io/en/stable/installation/#get-pip-py get_pip_file = self.CACHE_DIR / "get_pip.py" get_pip_file.touch() with get_pip_file.open(mode="wb") as file: with urlopen("https://bootstrap.pypa.io/get-pip.py") as request: while request.peek(1): file.write(request.read(8192)) # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd( [ self.PYTHON_EXECUTABLE, str(get_pip_file), "--target", str(self.PIP_DIR), "--no-user", ] ) sys.path.insert(0, str(self.PIP_DIR)) # Causes Python to find the downloaded pip module self.UPDATED_ENVIRONMENT["PYTHONPATH"] = str(self.PIP_DIR) self._PIP_INSTALLED = True def pip(self, args: List[str]) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"]) class Installer: SHELL: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable UPDATED_ENVIRONMENT: Dict[str, str] = {} _SCOOP_INSTALLED: bool = False PROCESS_TYPES: Dict[str, str] = { "cmd": "{0!r}", "shell": '"{0}"', "pip": "{0}", "scoop": "{0}", } REPO_URL = "https://github.com/mawillcockson/dotfiles.git" def __init__( self, temp_dir: Path, repository_dir: Path, shell: Optional[Path] = None, venv_dir: Optional[Path] = None, cache_dir: Optional[Path] = None, python_executable: str = sys.executable, updated_environment: Dict[str, str] = {}, ) -> None: if WINDOWS: if not shell: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path else: self.SHELL = shell self.REPOSITORY_DIR = repository_dir self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.VENV_DIR = venv_dir or (self.TEMP_DIR / "venv") self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = cache_dir or (self.TEMP_DIR / "cache") self.CACHE_DIR.mkdir(exist_ok=True) self.PYTHON_EXECUTABLE = python_executable self.UPDATED_ENVIRONMENT.update(updated_environment) async def cmd( self, args: List[str], check: bool = True, process_type: str = "cmd", ) -> "Expect": args_str = self.PROCESS_TYPES.get( process_type, self.PROCESS_TYPES["cmd"] ).format(args) cmd_str = f"{process_type} -> {args_str}" print(cmd_str) async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT, ) as expect: pass if check and expect.process.returncode != 0: raise CalledProcessError("returncode is not 0") return expect async def pip(self, args: List[str]) -> "Expect": return await self.cmd( [self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"], process_type="pip", ) async def shell( self, code: str, check: bool = True, process_type: str = "shell" ) -> "Expect": # NOTE: "{shell} -c {script}" works with powershell, sh (bash, dash, etc), not sure about other platforms return await self.cmd( [str(self.SHELL), "-c", code], check=check, process_type=process_type ) async def scoop(self, args: str) -> "Expect": if not (WINDOWS and self._SCOOP_INSTALLED): raise Exception( "not running scoop when not on Windows or scoop not installed" ) return await self.shell(f"scoop {args}", check=True, process_type="scoop") async def install_scoop(self) -> None: if not WINDOWS: raise Exception("not installing scoop when not on Windows") # Check if scoop is already installed self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") expect = await self.shell("scoop which scoop", check=False) self._SCOOP_INSTALLED = ( "is not recognized as the name of" not in expect.stdout.decode() and expect.process.returncode == 0 ) if not self._SCOOP_INSTALLED: # Set PowerShell's Execution Policy args = [ str(self.SHELL), "-c", "& {Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser}", ] print(f"running -> {args!r}") with trio.move_on_after(7): async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT ) as expect: with trio.move_on_after(2): await expect.expect( watch_for=b'(default is "N"):', respond_with=b"A", ) # NOTE: don't have to check if the response was sent, because # sometimes the execution policy is set without ever sending a # response (i.e. if the execution policy was already set). # Instead, just check if the policy is set correctly. result = await self.cmd( [str(self.SHELL), "-c", "& {Get-ExecutionPolicy}"], check=False ) if not "RemoteSigned" in result.stdout.decode(): raise Exception("could not set PowerShell Execution Policy") # Install Scoop result = await self.cmd( [str(self.SHELL), "-c", "& {iwr -useb https://get.scoop.sh | iex}"] ) stdout = result.stdout.decode().lower() if not ( "scoop was installed successfully!" in stdout or "scoop is already installed" in stdout ): raise Exception("scoop was not installed") self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") self._SCOOP_INSTALLED = True installed_apps = (await self.scoop("list")).stdout.decode() for requirement in ["aria2", "git", "python"]: if requirement in installed_apps: continue await self.scoop(f"install {requirement}") wanted_buckets = ["extras"] added_buckets = (await self.scoop("bucket list")).stdout.decode() for bucket in wanted_buckets: if bucket in added_buckets: continue await self.scoop(f"bucket add {bucket}") async def main(self) -> None: # Install rest of dependencies if MACOS or UNIX: raise NotImplementedError("only Windows supported currently") if WINDOWS: # implicitly installs git as well await self.install_scoop() for dependency_check in (["git", "--version"], ["python", "--version"]): try: await self.cmd(dependency_check, check=True) except CalledProcessError as err: raise Exception( f"dependency '{dependency_check!r}' was not found" ) from err ## Clone dotfiles repository self.REPOSITORY_DIR.mkdir(parents=True, exist_ok=True) # Check if there's an existing repository, and if that repository is clean # NOTE::FUTURE dulwich does not support submodules # https://github.com/dulwich/dulwich/issues/506 repo_status = await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "status", "--porcelain"], check=False) if "not a git repository" in repo_status.stdout.decode().lower(): await self.cmd( [ "git", "clone", "--recurse-submodules", self.REPO_URL, str(self.REPOSITORY_DIR), ] ) # Three scenarios: # - Repo exists and is completely clean and up to date # - Repo exists and there are uncommitted changes # - Repo exists and there are un-pushed changes # # The last one can be helped with dulwich if issue 506 is resolved, or # complex git commands, like: # https://stackoverflow.com/a/6133968 # # For now I'm saying "deal with it manually" # - Repo exists and there are changes # NOTE: optimistically try to pull in new upstream changes; could fail in numerous ways await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "pull", "--ff-only"]) # Run dotdrop raise NotImplementedError("setup dotfiles") if __name__ == "__main__": with TemporaryDirectory() as temp_dir: temp_dir_path = Path(temp_dir).resolve(strict=True) bootstrapper = Bootstrapper(temp_dir_path) bootstrapper.main() # import trio # isort:skip # installer = Installer( # temp_dir=bootstrapper.TEMP_DIR, # repository_dir=bootstrapper.REPOSITORY_DIR, # shell=bootstrapper.SHELL, # venv_dir=bootstrapper.VENV_DIR, # cache_dir=bootstrapper.CACHE_DIR, # python_executable=bootstrapper.PYTHON_EXECUTABLE, # updated_environment=bootstrapper.UPDATED_ENVIRONMENT, # ) # trio.run(installer.main)
raise Exception( f"could not find a virtual environment python at '{venv_python}'" )
conditional_block
install_dotfiles.py
#!/usr/bin/env python """ hopefully doesn't mess anything up too badly Significant inspiration was taken from: https://github.com/python-poetry/poetry/blob/c967a4a5abc6a0edd29c57eca307894f6e1c4f16/install-poetry.py Steps: - Ensure dependencies (git) - Download repository - Run dotdrop from the repo """ import os import sys from contextlib import asynccontextmanager from pathlib import Path from shutil import which from subprocess import ( PIPE, STDOUT, CalledProcessError, CompletedProcess, Popen, TimeoutExpired, run, ) from tempfile import TemporaryDirectory from typing import TYPE_CHECKING, Dict, List, Optional, Union from unittest.mock import patch from urllib.request import urlopen trio = None if TYPE_CHECKING: from io import BufferedWriter from typing import AsyncIterator, List, Tuple, Union import trio from trio import MemoryReceiveChannel, MemorySendChannel, Process WINDOWS = sys.platform.startswith(("win", "cygwin")) or ( sys.platform == "cli" and os.name == "nt" ) UNIX = sys.platform.startswith(("linux", "freebsd", "openbsd")) MACOS = sys.platform.startswith("darwin") if WINDOWS: import winreg def
(name: str) -> Optional[str]: if not WINDOWS: raise NotImplementedError( "can only update environment variables on Windows for now" ) with winreg.ConnectRegistry(None, winreg.HKEY_CURRENT_USER) as root: with winreg.OpenKey(root, "Environment", 0, winreg.KEY_ALL_ACCESS) as key: value, _ = winreg.QueryValueEx(key, name) return value # pylint: disable=too-many-instance-attributes,too-many-arguments class Expect: """ Manages running a process as a subprocess, and communicating with it, while echoing its output """ # From: # https://github.com/mawillcockson/dotfiles/blob/08e973f122b66ceadb009379dfed018a4b9e4eea/trio_watch_and_copy_demo.py # Which is inspired by: # https://github.com/python-trio/trio/blob/v0.19.0/trio/_subprocess.py#L587-L643 def __init__( self, process: "Process", printer_send_channel: "MemorySendChannel[bytes]", printer_receive_channel: "MemoryReceiveChannel[bytes]", notifier_send_channel: "MemorySendChannel[bytes]", opened_notifier_receive_channel: "MemoryReceiveChannel[bytes]", print_buffer: "BufferedWriter" = sys.stdout.buffer, # type: ignore ): self.process = process self.printer_send_channel = printer_send_channel self.printer_receive_channel = printer_receive_channel self.notifier_send_channel = notifier_send_channel self.opened_notifier_receive_channel = opened_notifier_receive_channel self.print_buffer = print_buffer self.stdout: bytes = b"" self.response_sent = False # NOTE: may be able to be combined with copier_recorder() async def printer( self, ) -> None: "echoes the process' output, dropping data if necessary" if not self.process: raise Exception("missing process; was this called inside a with statement?") async with self.printer_receive_channel: async for chunk in self.printer_receive_channel: try: self.print_buffer.write(chunk) except BlockingIOError: pass self.print_buffer.flush() async def copier_recorder( self, ) -> None: """ records the process' stdout, and mirrors it to printer() also sends notifications to expect() every time the process prints something """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert ( self.process.stdout is not None ), "process must be opened with stdout=PIPE and stderr=STDOUT" async with self.process.stdout, self.printer_send_channel, self.notifier_send_channel: async for chunk in self.process.stdout: # print(f"seen chunk: '{chunk!r}'", flush=True) # debug self.stdout += chunk await self.printer_send_channel.send(chunk) # send notification # if it's full, that's fine: if expect() is run, it'll see # there's a "pending" notification and check stdout, then wait # for another notification try: self.notifier_send_channel.send_nowait(b"") except trio.WouldBlock: pass except trio.BrokenResourceError as err: print(f"cause '{err.__cause__}'") raise err async def expect( self, watch_for: bytes, respond_with: bytes, ) -> None: """ called inside Expect.open_process()'s with block to watch for, and respond to, the process' output """ if not self.process: raise Exception("missing process; was this called inside a with statement?") assert self.process.stdin is not None, "process must be opened with stdin=PIPE" # NOTE: This could be improved to show which responses were sent, and which # weren't self.response_sent = False async with self.opened_notifier_receive_channel.clone() as notifier_receive_channel: # print("expect --> opened notifier channel", flush=True) # debug async for _ in notifier_receive_channel: # print("expect --> received chunk notification", flush=True) # debug if not self.response_sent and watch_for in self.stdout: # print("expect --> sending response...", flush=True) # debug await self.process.stdin.send_all(respond_with) self.response_sent = True # print("expect --> response sent", flush=True) # debug @classmethod @asynccontextmanager async def open_process( cls, args: "Union[str, List[str]]", env_additions: Dict[str, str] = {} ) -> "AsyncIterator[Expect]": """ entry point for using Expect() opens the process, opens a nursery, and starts the copier and printer this waits until the process is finished, so wrapping in a trio.move_on_after() is good to use as a timeout """ printer_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(1) printer_send_channel, printer_receive_channel = printer_channels notifier_channels: ( "Tuple[MemorySendChannel[bytes], MemoryReceiveChannel[bytes]]" ) = trio.open_memory_channel(0) notifier_send_channel, notifier_receive_channel = notifier_channels async with notifier_receive_channel: with patch.dict("os.environ", values=env_additions) as patched_env: async with await trio.open_process( args, stdin=PIPE, stdout=PIPE, stderr=STDOUT, env=patched_env ) as process: async with trio.open_nursery() as nursery: expect = cls( process=process, printer_send_channel=printer_send_channel, printer_receive_channel=printer_receive_channel, notifier_send_channel=notifier_send_channel, opened_notifier_receive_channel=notifier_receive_channel, ) nursery.start_soon(expect.copier_recorder) nursery.start_soon(expect.printer) yield expect # print("waiting for process") # debug await expect.process.wait() class Bootstrapper: UPDATED_ENVIRONMENT: Dict[str, str] = {} SHELL: Optional[Path] = None _SCOOP_INSTALLED = False _PIP_INSTALLED = False TEMP_DIR: Optional[Path] = None PIP_DIR: Optional[Path] = None VIRTUALENV_INSTALL_DIR: Optional[Path] = None VENV_DIR: Optional[Path] = None CACHE_DIR: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable def __init__(self, temp_dir: Path) -> None: if WINDOWS: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path self.REPOSITORY_DIR = Path("~/projects/dotfiles/").expanduser().resolve() self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.PIP_DIR = self.TEMP_DIR / "pip" self.PIP_DIR.mkdir(exist_ok=True) self.VIRTUALENV_INSTALL_DIR = self.TEMP_DIR / "virtualenv" self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.VENV_DIR = self.TEMP_DIR / "venv" self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = self.TEMP_DIR / "cache" self.CACHE_DIR.mkdir(exist_ok=True) self._PIP_INSTALLED = ( self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", "--version"]).returncode == 0 ) self._PIP_INSTALLED = False def cmd(self, args: List[str], stdin: str = "") -> CompletedProcess: print(f"running -> {args!r}") with patch.dict("os.environ", values=self.UPDATED_ENVIRONMENT) as patched_env: result = run( args, stdin=(stdin or PIPE), stderr=STDOUT, stdout=PIPE, check=False, env=patched_env, ) print(result.stdout.decode() or "") return result def shell(self, code: str) -> CompletedProcess: print(f'shell -> "{code}"') if self.UPDATED_ENVIRONMENT: with patch.dict( "os.environ", values=self.UPDATED_ENVIRONMENT ) as patched_env: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, env=patched_env, ) else: result = run( code, text=True, capture_output=True, check=False, shell=True, executable=str(self.SHELL) or None, ) if result.stdout: print(result.stdout) if result.stderr: print(result.stderr) return result def main(self) -> None: try: import virtualenv except ImportError: self.bootstrap_virtualenv() import virtualenv # isort:skip session = virtualenv.cli_run([str(self.VENV_DIR), "--clear", "--download"]) if WINDOWS: venv_python = self.VENV_DIR / "Scripts" / "python.exe" venv_modules = self.VENV_DIR / "Lib" / "site-packages" else: raise NotImplementedError("only Windows supported right now") if not (venv_python and venv_python.is_file()): raise Exception( f"could not find a virtual environment python at '{venv_python}'" ) assert venv_modules.is_dir(), f"missing directory '{venv_modules}'" self.PYTHON_EXECUTABLE = str(venv_python) sys.path.insert(0, str(venv_modules)) # Install trio self.pip(["install", "trio"]) import trio as trio_module # isort:skip global trio trio = trio_module installer = Installer( temp_dir=self.TEMP_DIR, repository_dir=self.REPOSITORY_DIR, shell=self.SHELL, venv_dir=self.VENV_DIR, cache_dir=self.CACHE_DIR, python_executable=self.PYTHON_EXECUTABLE, updated_environment=self.UPDATED_ENVIRONMENT, ) trio.run(installer.main) def bootstrap_virtualenv(self) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() self.VIRTUALENV_INSTALL_DIR.mkdir(exist_ok=True) self.pip( ["install", "virtualenv", "--target", str(self.VIRTUALENV_INSTALL_DIR)] ) sys.path.insert(0, str(self.VIRTUALENV_INSTALL_DIR)) import virtualenv # isort:skip def bootstrap_pip(self) -> None: if self._PIP_INSTALLED: return # NOTE: On Windows, the SSL certificates for some reason aren't # available until a web request is made that absolutely requires # them # If it's a truly fresh install, then any urlopen() call to an # https:// url will fail with an SSL context error: # >> ssl.SSLCertVerificationError: [SSL: CERTIFICATE_VERIFY_FAILED] certificate verify failed: unable to get local issuer certificate self.shell("iwr -useb https://bootstrap.pypa.io") # https://pip.pypa.io/en/stable/installation/#get-pip-py get_pip_file = self.CACHE_DIR / "get_pip.py" get_pip_file.touch() with get_pip_file.open(mode="wb") as file: with urlopen("https://bootstrap.pypa.io/get-pip.py") as request: while request.peek(1): file.write(request.read(8192)) # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd( [ self.PYTHON_EXECUTABLE, str(get_pip_file), "--target", str(self.PIP_DIR), "--no-user", ] ) sys.path.insert(0, str(self.PIP_DIR)) # Causes Python to find the downloaded pip module self.UPDATED_ENVIRONMENT["PYTHONPATH"] = str(self.PIP_DIR) self._PIP_INSTALLED = True def pip(self, args: List[str]) -> None: if not self._PIP_INSTALLED: self.bootstrap_pip() # NOTE: pip forces the --user flag on Microsoft Store Pythons: # https://stackoverflow.com/q/63783587 self.cmd([self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"]) class Installer: SHELL: Optional[Path] = None PYTHON_EXECUTABLE: str = sys.executable UPDATED_ENVIRONMENT: Dict[str, str] = {} _SCOOP_INSTALLED: bool = False PROCESS_TYPES: Dict[str, str] = { "cmd": "{0!r}", "shell": '"{0}"', "pip": "{0}", "scoop": "{0}", } REPO_URL = "https://github.com/mawillcockson/dotfiles.git" def __init__( self, temp_dir: Path, repository_dir: Path, shell: Optional[Path] = None, venv_dir: Optional[Path] = None, cache_dir: Optional[Path] = None, python_executable: str = sys.executable, updated_environment: Dict[str, str] = {}, ) -> None: if WINDOWS: if not shell: powershell_str = which("powershell") powershell_path = Path(powershell_str).resolve() if not (powershell_str and powershell_path.is_file()): raise FileNotFoundError( f"powershell not found at '{powershell_str}' or '{powershell_path}'" ) self.SHELL = powershell_path else: self.SHELL = shell self.REPOSITORY_DIR = repository_dir self.TEMP_DIR = temp_dir assert self.TEMP_DIR.is_dir() self.VENV_DIR = venv_dir or (self.TEMP_DIR / "venv") self.VENV_DIR.mkdir(exist_ok=True) self.CACHE_DIR = cache_dir or (self.TEMP_DIR / "cache") self.CACHE_DIR.mkdir(exist_ok=True) self.PYTHON_EXECUTABLE = python_executable self.UPDATED_ENVIRONMENT.update(updated_environment) async def cmd( self, args: List[str], check: bool = True, process_type: str = "cmd", ) -> "Expect": args_str = self.PROCESS_TYPES.get( process_type, self.PROCESS_TYPES["cmd"] ).format(args) cmd_str = f"{process_type} -> {args_str}" print(cmd_str) async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT, ) as expect: pass if check and expect.process.returncode != 0: raise CalledProcessError("returncode is not 0") return expect async def pip(self, args: List[str]) -> "Expect": return await self.cmd( [self.PYTHON_EXECUTABLE, "-m", "pip", *args, "--no-user"], process_type="pip", ) async def shell( self, code: str, check: bool = True, process_type: str = "shell" ) -> "Expect": # NOTE: "{shell} -c {script}" works with powershell, sh (bash, dash, etc), not sure about other platforms return await self.cmd( [str(self.SHELL), "-c", code], check=check, process_type=process_type ) async def scoop(self, args: str) -> "Expect": if not (WINDOWS and self._SCOOP_INSTALLED): raise Exception( "not running scoop when not on Windows or scoop not installed" ) return await self.shell(f"scoop {args}", check=True, process_type="scoop") async def install_scoop(self) -> None: if not WINDOWS: raise Exception("not installing scoop when not on Windows") # Check if scoop is already installed self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") expect = await self.shell("scoop which scoop", check=False) self._SCOOP_INSTALLED = ( "is not recognized as the name of" not in expect.stdout.decode() and expect.process.returncode == 0 ) if not self._SCOOP_INSTALLED: # Set PowerShell's Execution Policy args = [ str(self.SHELL), "-c", "& {Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser}", ] print(f"running -> {args!r}") with trio.move_on_after(7): async with Expect.open_process( args, env_additions=self.UPDATED_ENVIRONMENT ) as expect: with trio.move_on_after(2): await expect.expect( watch_for=b'(default is "N"):', respond_with=b"A", ) # NOTE: don't have to check if the response was sent, because # sometimes the execution policy is set without ever sending a # response (i.e. if the execution policy was already set). # Instead, just check if the policy is set correctly. result = await self.cmd( [str(self.SHELL), "-c", "& {Get-ExecutionPolicy}"], check=False ) if not "RemoteSigned" in result.stdout.decode(): raise Exception("could not set PowerShell Execution Policy") # Install Scoop result = await self.cmd( [str(self.SHELL), "-c", "& {iwr -useb https://get.scoop.sh | iex}"] ) stdout = result.stdout.decode().lower() if not ( "scoop was installed successfully!" in stdout or "scoop is already installed" in stdout ): raise Exception("scoop was not installed") self.UPDATED_ENVIRONMENT["PATH"] = win_get_user_env("PATH") self._SCOOP_INSTALLED = True installed_apps = (await self.scoop("list")).stdout.decode() for requirement in ["aria2", "git", "python"]: if requirement in installed_apps: continue await self.scoop(f"install {requirement}") wanted_buckets = ["extras"] added_buckets = (await self.scoop("bucket list")).stdout.decode() for bucket in wanted_buckets: if bucket in added_buckets: continue await self.scoop(f"bucket add {bucket}") async def main(self) -> None: # Install rest of dependencies if MACOS or UNIX: raise NotImplementedError("only Windows supported currently") if WINDOWS: # implicitly installs git as well await self.install_scoop() for dependency_check in (["git", "--version"], ["python", "--version"]): try: await self.cmd(dependency_check, check=True) except CalledProcessError as err: raise Exception( f"dependency '{dependency_check!r}' was not found" ) from err ## Clone dotfiles repository self.REPOSITORY_DIR.mkdir(parents=True, exist_ok=True) # Check if there's an existing repository, and if that repository is clean # NOTE::FUTURE dulwich does not support submodules # https://github.com/dulwich/dulwich/issues/506 repo_status = await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "status", "--porcelain"], check=False) if "not a git repository" in repo_status.stdout.decode().lower(): await self.cmd( [ "git", "clone", "--recurse-submodules", self.REPO_URL, str(self.REPOSITORY_DIR), ] ) # Three scenarios: # - Repo exists and is completely clean and up to date # - Repo exists and there are uncommitted changes # - Repo exists and there are un-pushed changes # # The last one can be helped with dulwich if issue 506 is resolved, or # complex git commands, like: # https://stackoverflow.com/a/6133968 # # For now I'm saying "deal with it manually" # - Repo exists and there are changes # NOTE: optimistically try to pull in new upstream changes; could fail in numerous ways await self.cmd(["git", "-C", str(self.REPOSITORY_DIR), "pull", "--ff-only"]) # Run dotdrop raise NotImplementedError("setup dotfiles") if __name__ == "__main__": with TemporaryDirectory() as temp_dir: temp_dir_path = Path(temp_dir).resolve(strict=True) bootstrapper = Bootstrapper(temp_dir_path) bootstrapper.main() # import trio # isort:skip # installer = Installer( # temp_dir=bootstrapper.TEMP_DIR, # repository_dir=bootstrapper.REPOSITORY_DIR, # shell=bootstrapper.SHELL, # venv_dir=bootstrapper.VENV_DIR, # cache_dir=bootstrapper.CACHE_DIR, # python_executable=bootstrapper.PYTHON_EXECUTABLE, # updated_environment=bootstrapper.UPDATED_ENVIRONMENT, # ) # trio.run(installer.main)
win_get_user_env
identifier_name
64112.user.js
// ==UserScript== // @name Google Preferences Without Cookies // @version 1.1 // @date 2009-12-12 // @description Allows setting some Google preferences without having cookies enabled. // @namespace http://www.theworldofstuff.com/greasemonkey/ // @copyright Copyright 2009 Jordon Kalilich (http://www.theworldofstuff.com/) // @license GNU GPL version 3 or later; http://www.gnu.org/copyleft/gpl.html // @require http://usocheckup.dune.net/64112.js?maxage=3 // @include http*://*.google.tld/* // ==/UserScript== /* Interface Language (hl) default: probably depends on the domain 2-letter code, ex: en Search language (lr) default: all languages (english); local language (other domains) multiple languages possible (ex: lang_en|lang_eo|lang_fr) Safesearch (safe) off images (default) active Number of results per page (num) default: 10 (google says this provides the fastest results) Any number 1-100 */ var interfaceLanguage = GM_getValue('google.hl', ''); var searchLanguage = GM_getValue('google.lr', ''); var safeSearch = GM_getValue('google.safe', 'images'); var numberOfResults = GM_getValue('google.num', 10); if (top.location == location) { // get rid of google's stupid facebook-like ajax magic search query loading stuff that breaks the script really badly // (somehow managing to make firefox load itself inside itself, if you want to know) // this code and the following function are basically taken from my other script, Facebook URL Cleaner v6 (http://userscripts.org/scripts/show/29910) var reg = /^(https?:\/\/([a-z]+\.)*google((\.[a-z]{1,3}){1,2})\/)[^#]*#((.*&)?q=.+)/i; document.addEventListener('DOMNodeInserted', checkURL, true); } function checkURL() { if (reg.test(location.href))
} // if on a search page if (/[?#](.*&)?q=/i.test(location.href)) { var queryString = location.href.substring(location.href.indexOf('?')); var origQueryString = queryString; // change the query string if necessary // if hl (interface language) is specified, change it because it is only specified from the preferences // (or set by virtue of the domain name, e.g. google.fr) - not specified by advanced search. if (interfaceLanguage) { var hlSetRight = new RegExp('[?&]hl='+interfaceLanguage); if (!hlSetRight.test(queryString)) { // remove hl (if it exists) and add it back with the right value queryString = queryString.replace(/(.*?)[?&]hl=[^&]*(.*)/,'$1$2'); queryString += '&hl=' + interfaceLanguage; } } if (searchLanguage) { var lrSetRight = new RegExp('[?&]lr='+searchLanguage); if (!lrSetRight.test(queryString)) { // include case where lr equals nothing (i.e., Google's own default) queryString = queryString.replace(/(.*?)[?&]lr=[^&]*(.*)/,'$1$2'); queryString += '&lr=' + searchLanguage; } } if (!/[?&]safe=/.test(queryString)) { queryString += '&safe=' + safeSearch; } // including num in the URL messes with OptimizeGoogle's results streaming, so don't include it unless it's necessary if (!/[?&]num=/.test(queryString) && numberOfResults != 10) { queryString += '&num=' + numberOfResults; } // see if anything changed if (origQueryString != queryString) { //alert(queryString); if (queryString.indexOf('?') != 0) { queryString = '?' + queryString; // add back the leading question mark if we accidentally got rid of it (man, this is messy) } location.replace(location.href.substring(0, location.href.indexOf('?')) + queryString); } } // if on preferences page, set preferences if (location.href.indexOf('/preferences') > 0) { // set title and blank out body document.getElementsByTagName('head')[0].innerHTML = '<title>Google Preferences Without Cookies</title>'; GM_addStyle('\ body {background: #ffffff; margin: 0; padding: 20px 150px 10px 150px; font-family: arial, sans-serif; font-size: 10pt; color: #000000}\ h1 {font-weight: bold; font-size: 14pt; background: #e5ecf9; border-top: 1px solid #3366cc; margin: 0 0 10px 0; padding: 2px 5px}\ fieldset {margin: 10px 0; padding: 5px; border: 3px solid #e5ecf9}\ legend {font-weight: bold}\ table {font-size: 10pt; margin: 0 20px 8px 20px; padding: 0}\ td {margin: 0; padding: 0 10px 0 0}\ p {margin: 8px; padding: 0}\ p#scriptLinkParagraph {font-size: 8pt; text-align: center}\ input {margin: 0; padding: 0}\ a:link {color: #0000cc}\ a:visited {color: #551a8b}\ a:active {color: #ff0000}\ '); document.getElementsByTagName('body')[0].innerHTML = ''; var pageBody = '<h1>Google Preferences Without Cookies</h1>'; // interface language pageBody += '<fieldset><legend>Interface Language</legend>'; var interfaceLanguages = { "": "Default (varies by domain)", "af": "Afrikaans", "ak": "Akan", "sq": "Albanian", "am": "Amharic", "ar": "Arabic", "hy": "Armenian", "az": "Azerbaijani", "eu": "Basque", "be": "Belarusian", "bn": "Bengali", "bh": "Bihari", "xx-bork": "Bork, bork, bork!", "bs": "Bosnian", "br": "Breton", "bg": "Bulgarian", "km": "Cambodian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "co": "Corsican", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "xx-elmer": "Elmer Fudd", "en": "English", "eo": "Esperanto", "et": "Estonian", "fo": "Faroese", "tl": "Filipino", "fi": "Finnish", "fr": "French", "fy": "Frisian", "gl": "Galician", "ka": "Georgian", "de": "German", "el": "Greek", "gn": "Guarani", "gu": "Gujarati", "xx-hacker": "Hacker", "ha": "Hausa", "haw": "Hawaiian", "iw": "Hebrew", "hi": "Hindi", "hu": "Hungarian", "is": "Icelandic", "ig": "Igbo", "id": "Indonesian", "ia": "Interlingua", "ga": "Irish", "it": "Italian", "ja": "Japanese", "jw": "Javanese", "kn": "Kannada", "kk": "Kazakh", "rw": "Kinyarwanda", "rn": "Kirundi", "xx-klingon": "Klingon", "ko": "Korean", "ku": "Kurdish", "ky": "Kyrgyz", "lo": "Laothian", "la": "Latin", "lv": "Latvian", "ln": "Lingala", "lt": "Lithuanian", "lg": "Luganda", "mk": "Macedonian", "mg": "Malagasy", "ms": "Malay", "ml": "Malayalam", "mt": "Maltese", "mi": "Maori", "mr": "Marathi", "mfe": "Mauritian Creole", "mo": "Moldavian", "mn": "Mongolian", "sr-ME": "Montenegrin", "ne": "Nepali", "no": "Norwegian", "nn": "Norwegian (Nynorsk)", "oc": "Occitan", "or": "Oriya", "om": "Oromo", "ps": "Pashto", "fa": "Persian", "xx-pirate": "Pirate", "pl": "Polish", "pt-BR": "Portuguese (Brazil)", "pt-PT": "Portuguese (Portugal)", "pa": "Punjabi", "qu": "Quechua", "ro": "Romanian", "rm": "Romansh", "ru": "Russian", "gd": "Scots Gaelic", "sr": "Serbian", "sh": "Serbo-Croatian", "st": "Sesotho", "sn": "Shona", "sd": "Sindhi", "si": "Sinhalese", "sk": "Slovak", "sl": "Slovenian", "so": "Somali", "es": "Spanish", "su": "Sundanese", "sw": "Swahili", "sv": "Swedish", "tg": "Tajik", "ta": "Tamil", "tt": "Tatar", "te": "Telugu", "th": "Thai", "ti": "Tigrinya", "to": "Tonga", "tr": "Turkish", "tk": "Turkmen", "tw": "Twi", "ug": "Uighur", "uk": "Ukrainian", "ur": "Urdu", "uz": "Uzbek", "vi": "Vietnamese", "cy": "Welsh", "xh": "Xhosa", "yi": "Yiddish", "yo": "Yoruba", "zu": "Zulu" } pageBody += '<p><label>Display Google tips and messages in: <select id="GPWOC_hl">'; for (var i in interfaceLanguages) { pageBody += '<option value="' + i + '"' + ((interfaceLanguage==i)?' selected="selected"':'') + '>' + interfaceLanguages[i] + '</option>'; } pageBody += '</select></label></p></fieldset>'; // search language pageBody += '<fieldset><legend>Search Languages</legend>'; pageBody += '<p><input type="radio" name="GPWOC_search" id="GPWOC_searchAll"'; pageBody += (searchLanguage) ? '' : ' checked="checked"'; pageBody += ' /> Search for pages written in any language (default)</p><p><input type="radio" name="GPWOC_search" id="GPWOC_searchSome"'; pageBody += (searchLanguage) ? ' checked="checked"' : ''; pageBody += ' /> Prefer pages written in these language(s):</p>'; var searchLanguages = { "af": "Afrikaans", "ar": "Arabic", "hy": "Armenian", "be": "Belarusian", "bg": "Bulgarian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "en": "English", "eo": "Esperanto", "et": "Estonian", "tl": "Filipino", "fi": "Finnish", "fr": "French", "de": "German", "el": "Greek", "iw": "Hebrew", "hu": "Hungarian", "is": "Icelandic", "id": "Indonesian", "it": "Italian", "ja": "Japanese", "ko": "Korean", "lv": "Latvian", "lt": "Lithuanian", "no": "Norwegian", "fa": "Persian", "pl": "Polish", "pt": "Portuguese", "ro": "Romanian", "ru": "Russian", "sr": "Serbian", "sk": "Slovak", "sl": "Slovenian", "es": "Spanish", "sw": "Swahili", "sv": "Swedish", "th": "Thai", "tr": "Turkish", "uk": "Ukrainian", "vi": "Vietnamese" } var searchLanguagesLength = 0; for (var i in searchLanguages) { searchLanguagesLength++; } var colHeight = Math.ceil(searchLanguagesLength / 4); var searchLangCount = 0; var searchLangColCount = 0; pageBody += '<table><tr><td valign="top">'; for (var i in searchLanguages) { searchLangCount++; searchLangColCount++; pageBody += '<label><input type="checkbox" id="GPWOC_lr' + searchLangCount + '" value="lang_' + i + '"' + ((searchLanguage.indexOf("lang_"+i)>=0)?' checked="checked"':'') + '/> ' + searchLanguages[i] + '</label><br />'; if (searchLangColCount == colHeight) { pageBody += '</td><td valign="top">'; searchLangColCount = 0; } } pageBody += '</td></tr></table></fieldset>'; // SafeSearch pageBody += '<fieldset><legend>SafeSearch Filtering</legend>'; pageBody += "<p>Google's SafeSearch blocks web pages containing explicit sexual content from appearing in search results.</p><p>"; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_active" value="active"' + ((safeSearch=='active')?' checked="checked"':'') + '> Use strict filtering (Filter both explicit text and explicit images)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_images" value="images"' + ((safeSearch != 'active' && safeSearch != 'off')?' checked="checked"':'') + '> Use moderate filtering (Filter explicit images only - default behavior)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_off" value="off"' + ((safeSearch=='off')?' checked="checked"':'') + '> Do not filter my search results</label></p>'; pageBody += '</fieldset>'; // number of results per page if (numberOfResults != parseInt(numberOfResults) || numberOfResults < 1 || numberOfResults > 100) { numberOfResults = 10; } pageBody += '<fieldset><legend>Number of Results</legend>'; pageBody += "<p>Google's default (10) provides the fastest results.</p>"; pageBody += '<p><label>Display <select id="GPWOC_num" name="num">'; for (i = 1; i <= 100; i++) { pageBody += '<option value="' + i + '"' + ((numberOfResults==i)?' selected="selected"':'') + '>' + i + '</option>'; } pageBody += '</select> results per page.</label></p></fieldset>'; // confirm button pageBody += '<p><input type="button" id="GPWOC_save" value="Save Preferences" /></p>'; // script link pageBody += '<p id="scriptLinkParagraph">This page is generated by <a href="http://userscripts.org/scripts/show/64112">Google Preferences Without Cookies</a>. Google is a trademark of Google Inc., which does not endorse this user script.</p>'; document.getElementsByTagName('body')[0].innerHTML = pageBody; // save values document.getElementById('GPWOC_save').addEventListener('click', function (event) { // interface language GM_setValue('google.hl', document.getElementById('GPWOC_hl').value); // search languages if (document.getElementById('GPWOC_searchAll').checked) { GM_setValue('google.lr', ''); } else if (document.getElementById('GPWOC_searchSome').checked) { var searchLanguages = ''; for (i = 1; i <= searchLanguagesLength; i++) { if (document.getElementById('GPWOC_lr' + i).checked) { searchLanguages += document.getElementById('GPWOC_lr' + i).value + '|'; } } if (searchLanguages.length > 0) { searchLanguages = searchLanguages.substring(0, searchLanguages.length - 1); // get rid of last '|' } GM_setValue('google.lr', searchLanguages); } // safesearch if (document.getElementById('GPWOC_safe_active').checked) { GM_setValue('google.safe', 'active'); } else if (document.getElementById('GPWOC_safe_images').checked) { GM_setValue('google.safe', 'images'); } else if (document.getElementById('GPWOC_safe_off').checked) { GM_setValue('google.safe', 'off'); } // results per page GM_setValue('google.num', document.getElementById('GPWOC_num').value); alert('Your preferences have been saved.'); }, true); } else { // if not on the preferences page, find a link to the preferences page var links = document.getElementsByTagName('a'); for (i = 0; i < links.length; i++) { if (links[i].href && links[i].href.indexOf('/preferences') >= 0) { links[i].innerHTML = 'Google Preferences Without Cookies'; break; } } }
{ document.removeEventListener('DOMNodeInserted', checkURL, true); // we need to remove the event listener or we might cause an infinite loop var newURL = location.href.replace(reg, '$1search?$5'); newURL = newURL.replace(/&fp=[^&]*/i, ''); // remove fp also, otherwise the new page will be blank location.replace(newURL); }
conditional_block
64112.user.js
// ==UserScript== // @name Google Preferences Without Cookies // @version 1.1 // @date 2009-12-12 // @description Allows setting some Google preferences without having cookies enabled. // @namespace http://www.theworldofstuff.com/greasemonkey/ // @copyright Copyright 2009 Jordon Kalilich (http://www.theworldofstuff.com/) // @license GNU GPL version 3 or later; http://www.gnu.org/copyleft/gpl.html // @require http://usocheckup.dune.net/64112.js?maxage=3 // @include http*://*.google.tld/* // ==/UserScript== /* Interface Language (hl) default: probably depends on the domain 2-letter code, ex: en Search language (lr) default: all languages (english); local language (other domains) multiple languages possible (ex: lang_en|lang_eo|lang_fr) Safesearch (safe) off images (default) active Number of results per page (num) default: 10 (google says this provides the fastest results) Any number 1-100 */ var interfaceLanguage = GM_getValue('google.hl', ''); var searchLanguage = GM_getValue('google.lr', ''); var safeSearch = GM_getValue('google.safe', 'images'); var numberOfResults = GM_getValue('google.num', 10); if (top.location == location) { // get rid of google's stupid facebook-like ajax magic search query loading stuff that breaks the script really badly // (somehow managing to make firefox load itself inside itself, if you want to know) // this code and the following function are basically taken from my other script, Facebook URL Cleaner v6 (http://userscripts.org/scripts/show/29910) var reg = /^(https?:\/\/([a-z]+\.)*google((\.[a-z]{1,3}){1,2})\/)[^#]*#((.*&)?q=.+)/i; document.addEventListener('DOMNodeInserted', checkURL, true); } function checkURL()
// if on a search page if (/[?#](.*&)?q=/i.test(location.href)) { var queryString = location.href.substring(location.href.indexOf('?')); var origQueryString = queryString; // change the query string if necessary // if hl (interface language) is specified, change it because it is only specified from the preferences // (or set by virtue of the domain name, e.g. google.fr) - not specified by advanced search. if (interfaceLanguage) { var hlSetRight = new RegExp('[?&]hl='+interfaceLanguage); if (!hlSetRight.test(queryString)) { // remove hl (if it exists) and add it back with the right value queryString = queryString.replace(/(.*?)[?&]hl=[^&]*(.*)/,'$1$2'); queryString += '&hl=' + interfaceLanguage; } } if (searchLanguage) { var lrSetRight = new RegExp('[?&]lr='+searchLanguage); if (!lrSetRight.test(queryString)) { // include case where lr equals nothing (i.e., Google's own default) queryString = queryString.replace(/(.*?)[?&]lr=[^&]*(.*)/,'$1$2'); queryString += '&lr=' + searchLanguage; } } if (!/[?&]safe=/.test(queryString)) { queryString += '&safe=' + safeSearch; } // including num in the URL messes with OptimizeGoogle's results streaming, so don't include it unless it's necessary if (!/[?&]num=/.test(queryString) && numberOfResults != 10) { queryString += '&num=' + numberOfResults; } // see if anything changed if (origQueryString != queryString) { //alert(queryString); if (queryString.indexOf('?') != 0) { queryString = '?' + queryString; // add back the leading question mark if we accidentally got rid of it (man, this is messy) } location.replace(location.href.substring(0, location.href.indexOf('?')) + queryString); } } // if on preferences page, set preferences if (location.href.indexOf('/preferences') > 0) { // set title and blank out body document.getElementsByTagName('head')[0].innerHTML = '<title>Google Preferences Without Cookies</title>'; GM_addStyle('\ body {background: #ffffff; margin: 0; padding: 20px 150px 10px 150px; font-family: arial, sans-serif; font-size: 10pt; color: #000000}\ h1 {font-weight: bold; font-size: 14pt; background: #e5ecf9; border-top: 1px solid #3366cc; margin: 0 0 10px 0; padding: 2px 5px}\ fieldset {margin: 10px 0; padding: 5px; border: 3px solid #e5ecf9}\ legend {font-weight: bold}\ table {font-size: 10pt; margin: 0 20px 8px 20px; padding: 0}\ td {margin: 0; padding: 0 10px 0 0}\ p {margin: 8px; padding: 0}\ p#scriptLinkParagraph {font-size: 8pt; text-align: center}\ input {margin: 0; padding: 0}\ a:link {color: #0000cc}\ a:visited {color: #551a8b}\ a:active {color: #ff0000}\ '); document.getElementsByTagName('body')[0].innerHTML = ''; var pageBody = '<h1>Google Preferences Without Cookies</h1>'; // interface language pageBody += '<fieldset><legend>Interface Language</legend>'; var interfaceLanguages = { "": "Default (varies by domain)", "af": "Afrikaans", "ak": "Akan", "sq": "Albanian", "am": "Amharic", "ar": "Arabic", "hy": "Armenian", "az": "Azerbaijani", "eu": "Basque", "be": "Belarusian", "bn": "Bengali", "bh": "Bihari", "xx-bork": "Bork, bork, bork!", "bs": "Bosnian", "br": "Breton", "bg": "Bulgarian", "km": "Cambodian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "co": "Corsican", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "xx-elmer": "Elmer Fudd", "en": "English", "eo": "Esperanto", "et": "Estonian", "fo": "Faroese", "tl": "Filipino", "fi": "Finnish", "fr": "French", "fy": "Frisian", "gl": "Galician", "ka": "Georgian", "de": "German", "el": "Greek", "gn": "Guarani", "gu": "Gujarati", "xx-hacker": "Hacker", "ha": "Hausa", "haw": "Hawaiian", "iw": "Hebrew", "hi": "Hindi", "hu": "Hungarian", "is": "Icelandic", "ig": "Igbo", "id": "Indonesian", "ia": "Interlingua", "ga": "Irish", "it": "Italian", "ja": "Japanese", "jw": "Javanese", "kn": "Kannada", "kk": "Kazakh", "rw": "Kinyarwanda", "rn": "Kirundi", "xx-klingon": "Klingon", "ko": "Korean", "ku": "Kurdish", "ky": "Kyrgyz", "lo": "Laothian", "la": "Latin", "lv": "Latvian", "ln": "Lingala", "lt": "Lithuanian", "lg": "Luganda", "mk": "Macedonian", "mg": "Malagasy", "ms": "Malay", "ml": "Malayalam", "mt": "Maltese", "mi": "Maori", "mr": "Marathi", "mfe": "Mauritian Creole", "mo": "Moldavian", "mn": "Mongolian", "sr-ME": "Montenegrin", "ne": "Nepali", "no": "Norwegian", "nn": "Norwegian (Nynorsk)", "oc": "Occitan", "or": "Oriya", "om": "Oromo", "ps": "Pashto", "fa": "Persian", "xx-pirate": "Pirate", "pl": "Polish", "pt-BR": "Portuguese (Brazil)", "pt-PT": "Portuguese (Portugal)", "pa": "Punjabi", "qu": "Quechua", "ro": "Romanian", "rm": "Romansh", "ru": "Russian", "gd": "Scots Gaelic", "sr": "Serbian", "sh": "Serbo-Croatian", "st": "Sesotho", "sn": "Shona", "sd": "Sindhi", "si": "Sinhalese", "sk": "Slovak", "sl": "Slovenian", "so": "Somali", "es": "Spanish", "su": "Sundanese", "sw": "Swahili", "sv": "Swedish", "tg": "Tajik", "ta": "Tamil", "tt": "Tatar", "te": "Telugu", "th": "Thai", "ti": "Tigrinya", "to": "Tonga", "tr": "Turkish", "tk": "Turkmen", "tw": "Twi", "ug": "Uighur", "uk": "Ukrainian", "ur": "Urdu", "uz": "Uzbek", "vi": "Vietnamese", "cy": "Welsh", "xh": "Xhosa", "yi": "Yiddish", "yo": "Yoruba", "zu": "Zulu" } pageBody += '<p><label>Display Google tips and messages in: <select id="GPWOC_hl">'; for (var i in interfaceLanguages) { pageBody += '<option value="' + i + '"' + ((interfaceLanguage==i)?' selected="selected"':'') + '>' + interfaceLanguages[i] + '</option>'; } pageBody += '</select></label></p></fieldset>'; // search language pageBody += '<fieldset><legend>Search Languages</legend>'; pageBody += '<p><input type="radio" name="GPWOC_search" id="GPWOC_searchAll"'; pageBody += (searchLanguage) ? '' : ' checked="checked"'; pageBody += ' /> Search for pages written in any language (default)</p><p><input type="radio" name="GPWOC_search" id="GPWOC_searchSome"'; pageBody += (searchLanguage) ? ' checked="checked"' : ''; pageBody += ' /> Prefer pages written in these language(s):</p>'; var searchLanguages = { "af": "Afrikaans", "ar": "Arabic", "hy": "Armenian", "be": "Belarusian", "bg": "Bulgarian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "en": "English", "eo": "Esperanto", "et": "Estonian", "tl": "Filipino", "fi": "Finnish", "fr": "French", "de": "German", "el": "Greek", "iw": "Hebrew", "hu": "Hungarian", "is": "Icelandic", "id": "Indonesian", "it": "Italian", "ja": "Japanese", "ko": "Korean", "lv": "Latvian", "lt": "Lithuanian", "no": "Norwegian", "fa": "Persian", "pl": "Polish", "pt": "Portuguese", "ro": "Romanian", "ru": "Russian", "sr": "Serbian", "sk": "Slovak", "sl": "Slovenian", "es": "Spanish", "sw": "Swahili", "sv": "Swedish", "th": "Thai", "tr": "Turkish", "uk": "Ukrainian", "vi": "Vietnamese" } var searchLanguagesLength = 0; for (var i in searchLanguages) { searchLanguagesLength++; } var colHeight = Math.ceil(searchLanguagesLength / 4); var searchLangCount = 0; var searchLangColCount = 0; pageBody += '<table><tr><td valign="top">'; for (var i in searchLanguages) { searchLangCount++; searchLangColCount++; pageBody += '<label><input type="checkbox" id="GPWOC_lr' + searchLangCount + '" value="lang_' + i + '"' + ((searchLanguage.indexOf("lang_"+i)>=0)?' checked="checked"':'') + '/> ' + searchLanguages[i] + '</label><br />'; if (searchLangColCount == colHeight) { pageBody += '</td><td valign="top">'; searchLangColCount = 0; } } pageBody += '</td></tr></table></fieldset>'; // SafeSearch pageBody += '<fieldset><legend>SafeSearch Filtering</legend>'; pageBody += "<p>Google's SafeSearch blocks web pages containing explicit sexual content from appearing in search results.</p><p>"; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_active" value="active"' + ((safeSearch=='active')?' checked="checked"':'') + '> Use strict filtering (Filter both explicit text and explicit images)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_images" value="images"' + ((safeSearch != 'active' && safeSearch != 'off')?' checked="checked"':'') + '> Use moderate filtering (Filter explicit images only - default behavior)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_off" value="off"' + ((safeSearch=='off')?' checked="checked"':'') + '> Do not filter my search results</label></p>'; pageBody += '</fieldset>'; // number of results per page if (numberOfResults != parseInt(numberOfResults) || numberOfResults < 1 || numberOfResults > 100) { numberOfResults = 10; } pageBody += '<fieldset><legend>Number of Results</legend>'; pageBody += "<p>Google's default (10) provides the fastest results.</p>"; pageBody += '<p><label>Display <select id="GPWOC_num" name="num">'; for (i = 1; i <= 100; i++) { pageBody += '<option value="' + i + '"' + ((numberOfResults==i)?' selected="selected"':'') + '>' + i + '</option>'; } pageBody += '</select> results per page.</label></p></fieldset>'; // confirm button pageBody += '<p><input type="button" id="GPWOC_save" value="Save Preferences" /></p>'; // script link pageBody += '<p id="scriptLinkParagraph">This page is generated by <a href="http://userscripts.org/scripts/show/64112">Google Preferences Without Cookies</a>. Google is a trademark of Google Inc., which does not endorse this user script.</p>'; document.getElementsByTagName('body')[0].innerHTML = pageBody; // save values document.getElementById('GPWOC_save').addEventListener('click', function (event) { // interface language GM_setValue('google.hl', document.getElementById('GPWOC_hl').value); // search languages if (document.getElementById('GPWOC_searchAll').checked) { GM_setValue('google.lr', ''); } else if (document.getElementById('GPWOC_searchSome').checked) { var searchLanguages = ''; for (i = 1; i <= searchLanguagesLength; i++) { if (document.getElementById('GPWOC_lr' + i).checked) { searchLanguages += document.getElementById('GPWOC_lr' + i).value + '|'; } } if (searchLanguages.length > 0) { searchLanguages = searchLanguages.substring(0, searchLanguages.length - 1); // get rid of last '|' } GM_setValue('google.lr', searchLanguages); } // safesearch if (document.getElementById('GPWOC_safe_active').checked) { GM_setValue('google.safe', 'active'); } else if (document.getElementById('GPWOC_safe_images').checked) { GM_setValue('google.safe', 'images'); } else if (document.getElementById('GPWOC_safe_off').checked) { GM_setValue('google.safe', 'off'); } // results per page GM_setValue('google.num', document.getElementById('GPWOC_num').value); alert('Your preferences have been saved.'); }, true); } else { // if not on the preferences page, find a link to the preferences page var links = document.getElementsByTagName('a'); for (i = 0; i < links.length; i++) { if (links[i].href && links[i].href.indexOf('/preferences') >= 0) { links[i].innerHTML = 'Google Preferences Without Cookies'; break; } } }
{ if (reg.test(location.href)) { document.removeEventListener('DOMNodeInserted', checkURL, true); // we need to remove the event listener or we might cause an infinite loop var newURL = location.href.replace(reg, '$1search?$5'); newURL = newURL.replace(/&fp=[^&]*/i, ''); // remove fp also, otherwise the new page will be blank location.replace(newURL); } }
identifier_body
64112.user.js
// ==UserScript== // @name Google Preferences Without Cookies // @version 1.1 // @date 2009-12-12 // @description Allows setting some Google preferences without having cookies enabled. // @namespace http://www.theworldofstuff.com/greasemonkey/ // @copyright Copyright 2009 Jordon Kalilich (http://www.theworldofstuff.com/) // @license GNU GPL version 3 or later; http://www.gnu.org/copyleft/gpl.html // @require http://usocheckup.dune.net/64112.js?maxage=3 // @include http*://*.google.tld/* // ==/UserScript== /* Interface Language (hl) default: probably depends on the domain 2-letter code, ex: en Search language (lr) default: all languages (english); local language (other domains) multiple languages possible (ex: lang_en|lang_eo|lang_fr) Safesearch (safe) off images (default) active Number of results per page (num) default: 10 (google says this provides the fastest results) Any number 1-100 */ var interfaceLanguage = GM_getValue('google.hl', ''); var searchLanguage = GM_getValue('google.lr', ''); var safeSearch = GM_getValue('google.safe', 'images'); var numberOfResults = GM_getValue('google.num', 10); if (top.location == location) { // get rid of google's stupid facebook-like ajax magic search query loading stuff that breaks the script really badly // (somehow managing to make firefox load itself inside itself, if you want to know) // this code and the following function are basically taken from my other script, Facebook URL Cleaner v6 (http://userscripts.org/scripts/show/29910) var reg = /^(https?:\/\/([a-z]+\.)*google((\.[a-z]{1,3}){1,2})\/)[^#]*#((.*&)?q=.+)/i; document.addEventListener('DOMNodeInserted', checkURL, true); } function checkURL() { if (reg.test(location.href)) { document.removeEventListener('DOMNodeInserted', checkURL, true); // we need to remove the event listener or we might cause an infinite loop var newURL = location.href.replace(reg, '$1search?$5'); newURL = newURL.replace(/&fp=[^&]*/i, ''); // remove fp also, otherwise the new page will be blank location.replace(newURL); } } // if on a search page if (/[?#](.*&)?q=/i.test(location.href)) { var queryString = location.href.substring(location.href.indexOf('?')); var origQueryString = queryString; // change the query string if necessary // if hl (interface language) is specified, change it because it is only specified from the preferences // (or set by virtue of the domain name, e.g. google.fr) - not specified by advanced search. if (interfaceLanguage) { var hlSetRight = new RegExp('[?&]hl='+interfaceLanguage); if (!hlSetRight.test(queryString)) { // remove hl (if it exists) and add it back with the right value queryString = queryString.replace(/(.*?)[?&]hl=[^&]*(.*)/,'$1$2'); queryString += '&hl=' + interfaceLanguage; } } if (searchLanguage) { var lrSetRight = new RegExp('[?&]lr='+searchLanguage); if (!lrSetRight.test(queryString)) { // include case where lr equals nothing (i.e., Google's own default) queryString = queryString.replace(/(.*?)[?&]lr=[^&]*(.*)/,'$1$2'); queryString += '&lr=' + searchLanguage; } } if (!/[?&]safe=/.test(queryString)) { queryString += '&safe=' + safeSearch; } // including num in the URL messes with OptimizeGoogle's results streaming, so don't include it unless it's necessary if (!/[?&]num=/.test(queryString) && numberOfResults != 10) { queryString += '&num=' + numberOfResults; } // see if anything changed if (origQueryString != queryString) { //alert(queryString); if (queryString.indexOf('?') != 0) { queryString = '?' + queryString; // add back the leading question mark if we accidentally got rid of it (man, this is messy) } location.replace(location.href.substring(0, location.href.indexOf('?')) + queryString); } } // if on preferences page, set preferences if (location.href.indexOf('/preferences') > 0) { // set title and blank out body document.getElementsByTagName('head')[0].innerHTML = '<title>Google Preferences Without Cookies</title>'; GM_addStyle('\ body {background: #ffffff; margin: 0; padding: 20px 150px 10px 150px; font-family: arial, sans-serif; font-size: 10pt; color: #000000}\ h1 {font-weight: bold; font-size: 14pt; background: #e5ecf9; border-top: 1px solid #3366cc; margin: 0 0 10px 0; padding: 2px 5px}\ fieldset {margin: 10px 0; padding: 5px; border: 3px solid #e5ecf9}\ legend {font-weight: bold}\ table {font-size: 10pt; margin: 0 20px 8px 20px; padding: 0}\ td {margin: 0; padding: 0 10px 0 0}\ p {margin: 8px; padding: 0}\ p#scriptLinkParagraph {font-size: 8pt; text-align: center}\ input {margin: 0; padding: 0}\ a:link {color: #0000cc}\ a:visited {color: #551a8b}\ a:active {color: #ff0000}\ '); document.getElementsByTagName('body')[0].innerHTML = ''; var pageBody = '<h1>Google Preferences Without Cookies</h1>'; // interface language pageBody += '<fieldset><legend>Interface Language</legend>'; var interfaceLanguages = { "": "Default (varies by domain)", "af": "Afrikaans", "ak": "Akan", "sq": "Albanian", "am": "Amharic", "ar": "Arabic", "hy": "Armenian", "az": "Azerbaijani", "eu": "Basque", "be": "Belarusian", "bn": "Bengali", "bh": "Bihari", "xx-bork": "Bork, bork, bork!", "bs": "Bosnian", "br": "Breton", "bg": "Bulgarian", "km": "Cambodian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "co": "Corsican", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "xx-elmer": "Elmer Fudd", "en": "English", "eo": "Esperanto", "et": "Estonian", "fo": "Faroese", "tl": "Filipino", "fi": "Finnish", "fr": "French", "fy": "Frisian", "gl": "Galician", "ka": "Georgian", "de": "German", "el": "Greek", "gn": "Guarani", "gu": "Gujarati", "xx-hacker": "Hacker", "ha": "Hausa", "haw": "Hawaiian", "iw": "Hebrew", "hi": "Hindi", "hu": "Hungarian", "is": "Icelandic", "ig": "Igbo", "id": "Indonesian", "ia": "Interlingua", "ga": "Irish", "it": "Italian", "ja": "Japanese", "jw": "Javanese", "kn": "Kannada", "kk": "Kazakh", "rw": "Kinyarwanda", "rn": "Kirundi", "xx-klingon": "Klingon", "ko": "Korean", "ku": "Kurdish", "ky": "Kyrgyz", "lo": "Laothian", "la": "Latin", "lv": "Latvian", "ln": "Lingala", "lt": "Lithuanian", "lg": "Luganda", "mk": "Macedonian", "mg": "Malagasy", "ms": "Malay", "ml": "Malayalam", "mt": "Maltese", "mi": "Maori", "mr": "Marathi", "mfe": "Mauritian Creole", "mo": "Moldavian", "mn": "Mongolian", "sr-ME": "Montenegrin", "ne": "Nepali", "no": "Norwegian", "nn": "Norwegian (Nynorsk)", "oc": "Occitan", "or": "Oriya", "om": "Oromo", "ps": "Pashto", "fa": "Persian", "xx-pirate": "Pirate", "pl": "Polish", "pt-BR": "Portuguese (Brazil)", "pt-PT": "Portuguese (Portugal)", "pa": "Punjabi", "qu": "Quechua", "ro": "Romanian", "rm": "Romansh", "ru": "Russian", "gd": "Scots Gaelic", "sr": "Serbian", "sh": "Serbo-Croatian", "st": "Sesotho", "sn": "Shona", "sd": "Sindhi", "si": "Sinhalese", "sk": "Slovak", "sl": "Slovenian", "so": "Somali", "es": "Spanish", "su": "Sundanese", "sw": "Swahili", "sv": "Swedish", "tg": "Tajik", "ta": "Tamil", "tt": "Tatar", "te": "Telugu", "th": "Thai", "ti": "Tigrinya", "to": "Tonga", "tr": "Turkish", "tk": "Turkmen", "tw": "Twi", "ug": "Uighur", "uk": "Ukrainian", "ur": "Urdu", "uz": "Uzbek", "vi": "Vietnamese", "cy": "Welsh",
"zu": "Zulu" } pageBody += '<p><label>Display Google tips and messages in: <select id="GPWOC_hl">'; for (var i in interfaceLanguages) { pageBody += '<option value="' + i + '"' + ((interfaceLanguage==i)?' selected="selected"':'') + '>' + interfaceLanguages[i] + '</option>'; } pageBody += '</select></label></p></fieldset>'; // search language pageBody += '<fieldset><legend>Search Languages</legend>'; pageBody += '<p><input type="radio" name="GPWOC_search" id="GPWOC_searchAll"'; pageBody += (searchLanguage) ? '' : ' checked="checked"'; pageBody += ' /> Search for pages written in any language (default)</p><p><input type="radio" name="GPWOC_search" id="GPWOC_searchSome"'; pageBody += (searchLanguage) ? ' checked="checked"' : ''; pageBody += ' /> Prefer pages written in these language(s):</p>'; var searchLanguages = { "af": "Afrikaans", "ar": "Arabic", "hy": "Armenian", "be": "Belarusian", "bg": "Bulgarian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "en": "English", "eo": "Esperanto", "et": "Estonian", "tl": "Filipino", "fi": "Finnish", "fr": "French", "de": "German", "el": "Greek", "iw": "Hebrew", "hu": "Hungarian", "is": "Icelandic", "id": "Indonesian", "it": "Italian", "ja": "Japanese", "ko": "Korean", "lv": "Latvian", "lt": "Lithuanian", "no": "Norwegian", "fa": "Persian", "pl": "Polish", "pt": "Portuguese", "ro": "Romanian", "ru": "Russian", "sr": "Serbian", "sk": "Slovak", "sl": "Slovenian", "es": "Spanish", "sw": "Swahili", "sv": "Swedish", "th": "Thai", "tr": "Turkish", "uk": "Ukrainian", "vi": "Vietnamese" } var searchLanguagesLength = 0; for (var i in searchLanguages) { searchLanguagesLength++; } var colHeight = Math.ceil(searchLanguagesLength / 4); var searchLangCount = 0; var searchLangColCount = 0; pageBody += '<table><tr><td valign="top">'; for (var i in searchLanguages) { searchLangCount++; searchLangColCount++; pageBody += '<label><input type="checkbox" id="GPWOC_lr' + searchLangCount + '" value="lang_' + i + '"' + ((searchLanguage.indexOf("lang_"+i)>=0)?' checked="checked"':'') + '/> ' + searchLanguages[i] + '</label><br />'; if (searchLangColCount == colHeight) { pageBody += '</td><td valign="top">'; searchLangColCount = 0; } } pageBody += '</td></tr></table></fieldset>'; // SafeSearch pageBody += '<fieldset><legend>SafeSearch Filtering</legend>'; pageBody += "<p>Google's SafeSearch blocks web pages containing explicit sexual content from appearing in search results.</p><p>"; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_active" value="active"' + ((safeSearch=='active')?' checked="checked"':'') + '> Use strict filtering (Filter both explicit text and explicit images)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_images" value="images"' + ((safeSearch != 'active' && safeSearch != 'off')?' checked="checked"':'') + '> Use moderate filtering (Filter explicit images only - default behavior)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_off" value="off"' + ((safeSearch=='off')?' checked="checked"':'') + '> Do not filter my search results</label></p>'; pageBody += '</fieldset>'; // number of results per page if (numberOfResults != parseInt(numberOfResults) || numberOfResults < 1 || numberOfResults > 100) { numberOfResults = 10; } pageBody += '<fieldset><legend>Number of Results</legend>'; pageBody += "<p>Google's default (10) provides the fastest results.</p>"; pageBody += '<p><label>Display <select id="GPWOC_num" name="num">'; for (i = 1; i <= 100; i++) { pageBody += '<option value="' + i + '"' + ((numberOfResults==i)?' selected="selected"':'') + '>' + i + '</option>'; } pageBody += '</select> results per page.</label></p></fieldset>'; // confirm button pageBody += '<p><input type="button" id="GPWOC_save" value="Save Preferences" /></p>'; // script link pageBody += '<p id="scriptLinkParagraph">This page is generated by <a href="http://userscripts.org/scripts/show/64112">Google Preferences Without Cookies</a>. Google is a trademark of Google Inc., which does not endorse this user script.</p>'; document.getElementsByTagName('body')[0].innerHTML = pageBody; // save values document.getElementById('GPWOC_save').addEventListener('click', function (event) { // interface language GM_setValue('google.hl', document.getElementById('GPWOC_hl').value); // search languages if (document.getElementById('GPWOC_searchAll').checked) { GM_setValue('google.lr', ''); } else if (document.getElementById('GPWOC_searchSome').checked) { var searchLanguages = ''; for (i = 1; i <= searchLanguagesLength; i++) { if (document.getElementById('GPWOC_lr' + i).checked) { searchLanguages += document.getElementById('GPWOC_lr' + i).value + '|'; } } if (searchLanguages.length > 0) { searchLanguages = searchLanguages.substring(0, searchLanguages.length - 1); // get rid of last '|' } GM_setValue('google.lr', searchLanguages); } // safesearch if (document.getElementById('GPWOC_safe_active').checked) { GM_setValue('google.safe', 'active'); } else if (document.getElementById('GPWOC_safe_images').checked) { GM_setValue('google.safe', 'images'); } else if (document.getElementById('GPWOC_safe_off').checked) { GM_setValue('google.safe', 'off'); } // results per page GM_setValue('google.num', document.getElementById('GPWOC_num').value); alert('Your preferences have been saved.'); }, true); } else { // if not on the preferences page, find a link to the preferences page var links = document.getElementsByTagName('a'); for (i = 0; i < links.length; i++) { if (links[i].href && links[i].href.indexOf('/preferences') >= 0) { links[i].innerHTML = 'Google Preferences Without Cookies'; break; } } }
"xh": "Xhosa", "yi": "Yiddish", "yo": "Yoruba",
random_line_split
64112.user.js
// ==UserScript== // @name Google Preferences Without Cookies // @version 1.1 // @date 2009-12-12 // @description Allows setting some Google preferences without having cookies enabled. // @namespace http://www.theworldofstuff.com/greasemonkey/ // @copyright Copyright 2009 Jordon Kalilich (http://www.theworldofstuff.com/) // @license GNU GPL version 3 or later; http://www.gnu.org/copyleft/gpl.html // @require http://usocheckup.dune.net/64112.js?maxage=3 // @include http*://*.google.tld/* // ==/UserScript== /* Interface Language (hl) default: probably depends on the domain 2-letter code, ex: en Search language (lr) default: all languages (english); local language (other domains) multiple languages possible (ex: lang_en|lang_eo|lang_fr) Safesearch (safe) off images (default) active Number of results per page (num) default: 10 (google says this provides the fastest results) Any number 1-100 */ var interfaceLanguage = GM_getValue('google.hl', ''); var searchLanguage = GM_getValue('google.lr', ''); var safeSearch = GM_getValue('google.safe', 'images'); var numberOfResults = GM_getValue('google.num', 10); if (top.location == location) { // get rid of google's stupid facebook-like ajax magic search query loading stuff that breaks the script really badly // (somehow managing to make firefox load itself inside itself, if you want to know) // this code and the following function are basically taken from my other script, Facebook URL Cleaner v6 (http://userscripts.org/scripts/show/29910) var reg = /^(https?:\/\/([a-z]+\.)*google((\.[a-z]{1,3}){1,2})\/)[^#]*#((.*&)?q=.+)/i; document.addEventListener('DOMNodeInserted', checkURL, true); } function
() { if (reg.test(location.href)) { document.removeEventListener('DOMNodeInserted', checkURL, true); // we need to remove the event listener or we might cause an infinite loop var newURL = location.href.replace(reg, '$1search?$5'); newURL = newURL.replace(/&fp=[^&]*/i, ''); // remove fp also, otherwise the new page will be blank location.replace(newURL); } } // if on a search page if (/[?#](.*&)?q=/i.test(location.href)) { var queryString = location.href.substring(location.href.indexOf('?')); var origQueryString = queryString; // change the query string if necessary // if hl (interface language) is specified, change it because it is only specified from the preferences // (or set by virtue of the domain name, e.g. google.fr) - not specified by advanced search. if (interfaceLanguage) { var hlSetRight = new RegExp('[?&]hl='+interfaceLanguage); if (!hlSetRight.test(queryString)) { // remove hl (if it exists) and add it back with the right value queryString = queryString.replace(/(.*?)[?&]hl=[^&]*(.*)/,'$1$2'); queryString += '&hl=' + interfaceLanguage; } } if (searchLanguage) { var lrSetRight = new RegExp('[?&]lr='+searchLanguage); if (!lrSetRight.test(queryString)) { // include case where lr equals nothing (i.e., Google's own default) queryString = queryString.replace(/(.*?)[?&]lr=[^&]*(.*)/,'$1$2'); queryString += '&lr=' + searchLanguage; } } if (!/[?&]safe=/.test(queryString)) { queryString += '&safe=' + safeSearch; } // including num in the URL messes with OptimizeGoogle's results streaming, so don't include it unless it's necessary if (!/[?&]num=/.test(queryString) && numberOfResults != 10) { queryString += '&num=' + numberOfResults; } // see if anything changed if (origQueryString != queryString) { //alert(queryString); if (queryString.indexOf('?') != 0) { queryString = '?' + queryString; // add back the leading question mark if we accidentally got rid of it (man, this is messy) } location.replace(location.href.substring(0, location.href.indexOf('?')) + queryString); } } // if on preferences page, set preferences if (location.href.indexOf('/preferences') > 0) { // set title and blank out body document.getElementsByTagName('head')[0].innerHTML = '<title>Google Preferences Without Cookies</title>'; GM_addStyle('\ body {background: #ffffff; margin: 0; padding: 20px 150px 10px 150px; font-family: arial, sans-serif; font-size: 10pt; color: #000000}\ h1 {font-weight: bold; font-size: 14pt; background: #e5ecf9; border-top: 1px solid #3366cc; margin: 0 0 10px 0; padding: 2px 5px}\ fieldset {margin: 10px 0; padding: 5px; border: 3px solid #e5ecf9}\ legend {font-weight: bold}\ table {font-size: 10pt; margin: 0 20px 8px 20px; padding: 0}\ td {margin: 0; padding: 0 10px 0 0}\ p {margin: 8px; padding: 0}\ p#scriptLinkParagraph {font-size: 8pt; text-align: center}\ input {margin: 0; padding: 0}\ a:link {color: #0000cc}\ a:visited {color: #551a8b}\ a:active {color: #ff0000}\ '); document.getElementsByTagName('body')[0].innerHTML = ''; var pageBody = '<h1>Google Preferences Without Cookies</h1>'; // interface language pageBody += '<fieldset><legend>Interface Language</legend>'; var interfaceLanguages = { "": "Default (varies by domain)", "af": "Afrikaans", "ak": "Akan", "sq": "Albanian", "am": "Amharic", "ar": "Arabic", "hy": "Armenian", "az": "Azerbaijani", "eu": "Basque", "be": "Belarusian", "bn": "Bengali", "bh": "Bihari", "xx-bork": "Bork, bork, bork!", "bs": "Bosnian", "br": "Breton", "bg": "Bulgarian", "km": "Cambodian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "co": "Corsican", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "xx-elmer": "Elmer Fudd", "en": "English", "eo": "Esperanto", "et": "Estonian", "fo": "Faroese", "tl": "Filipino", "fi": "Finnish", "fr": "French", "fy": "Frisian", "gl": "Galician", "ka": "Georgian", "de": "German", "el": "Greek", "gn": "Guarani", "gu": "Gujarati", "xx-hacker": "Hacker", "ha": "Hausa", "haw": "Hawaiian", "iw": "Hebrew", "hi": "Hindi", "hu": "Hungarian", "is": "Icelandic", "ig": "Igbo", "id": "Indonesian", "ia": "Interlingua", "ga": "Irish", "it": "Italian", "ja": "Japanese", "jw": "Javanese", "kn": "Kannada", "kk": "Kazakh", "rw": "Kinyarwanda", "rn": "Kirundi", "xx-klingon": "Klingon", "ko": "Korean", "ku": "Kurdish", "ky": "Kyrgyz", "lo": "Laothian", "la": "Latin", "lv": "Latvian", "ln": "Lingala", "lt": "Lithuanian", "lg": "Luganda", "mk": "Macedonian", "mg": "Malagasy", "ms": "Malay", "ml": "Malayalam", "mt": "Maltese", "mi": "Maori", "mr": "Marathi", "mfe": "Mauritian Creole", "mo": "Moldavian", "mn": "Mongolian", "sr-ME": "Montenegrin", "ne": "Nepali", "no": "Norwegian", "nn": "Norwegian (Nynorsk)", "oc": "Occitan", "or": "Oriya", "om": "Oromo", "ps": "Pashto", "fa": "Persian", "xx-pirate": "Pirate", "pl": "Polish", "pt-BR": "Portuguese (Brazil)", "pt-PT": "Portuguese (Portugal)", "pa": "Punjabi", "qu": "Quechua", "ro": "Romanian", "rm": "Romansh", "ru": "Russian", "gd": "Scots Gaelic", "sr": "Serbian", "sh": "Serbo-Croatian", "st": "Sesotho", "sn": "Shona", "sd": "Sindhi", "si": "Sinhalese", "sk": "Slovak", "sl": "Slovenian", "so": "Somali", "es": "Spanish", "su": "Sundanese", "sw": "Swahili", "sv": "Swedish", "tg": "Tajik", "ta": "Tamil", "tt": "Tatar", "te": "Telugu", "th": "Thai", "ti": "Tigrinya", "to": "Tonga", "tr": "Turkish", "tk": "Turkmen", "tw": "Twi", "ug": "Uighur", "uk": "Ukrainian", "ur": "Urdu", "uz": "Uzbek", "vi": "Vietnamese", "cy": "Welsh", "xh": "Xhosa", "yi": "Yiddish", "yo": "Yoruba", "zu": "Zulu" } pageBody += '<p><label>Display Google tips and messages in: <select id="GPWOC_hl">'; for (var i in interfaceLanguages) { pageBody += '<option value="' + i + '"' + ((interfaceLanguage==i)?' selected="selected"':'') + '>' + interfaceLanguages[i] + '</option>'; } pageBody += '</select></label></p></fieldset>'; // search language pageBody += '<fieldset><legend>Search Languages</legend>'; pageBody += '<p><input type="radio" name="GPWOC_search" id="GPWOC_searchAll"'; pageBody += (searchLanguage) ? '' : ' checked="checked"'; pageBody += ' /> Search for pages written in any language (default)</p><p><input type="radio" name="GPWOC_search" id="GPWOC_searchSome"'; pageBody += (searchLanguage) ? ' checked="checked"' : ''; pageBody += ' /> Prefer pages written in these language(s):</p>'; var searchLanguages = { "af": "Afrikaans", "ar": "Arabic", "hy": "Armenian", "be": "Belarusian", "bg": "Bulgarian", "ca": "Catalan", "zh-CN": "Chinese (Simplified)", "zh-TW": "Chinese (Traditional)", "hr": "Croatian", "cs": "Czech", "da": "Danish", "nl": "Dutch", "en": "English", "eo": "Esperanto", "et": "Estonian", "tl": "Filipino", "fi": "Finnish", "fr": "French", "de": "German", "el": "Greek", "iw": "Hebrew", "hu": "Hungarian", "is": "Icelandic", "id": "Indonesian", "it": "Italian", "ja": "Japanese", "ko": "Korean", "lv": "Latvian", "lt": "Lithuanian", "no": "Norwegian", "fa": "Persian", "pl": "Polish", "pt": "Portuguese", "ro": "Romanian", "ru": "Russian", "sr": "Serbian", "sk": "Slovak", "sl": "Slovenian", "es": "Spanish", "sw": "Swahili", "sv": "Swedish", "th": "Thai", "tr": "Turkish", "uk": "Ukrainian", "vi": "Vietnamese" } var searchLanguagesLength = 0; for (var i in searchLanguages) { searchLanguagesLength++; } var colHeight = Math.ceil(searchLanguagesLength / 4); var searchLangCount = 0; var searchLangColCount = 0; pageBody += '<table><tr><td valign="top">'; for (var i in searchLanguages) { searchLangCount++; searchLangColCount++; pageBody += '<label><input type="checkbox" id="GPWOC_lr' + searchLangCount + '" value="lang_' + i + '"' + ((searchLanguage.indexOf("lang_"+i)>=0)?' checked="checked"':'') + '/> ' + searchLanguages[i] + '</label><br />'; if (searchLangColCount == colHeight) { pageBody += '</td><td valign="top">'; searchLangColCount = 0; } } pageBody += '</td></tr></table></fieldset>'; // SafeSearch pageBody += '<fieldset><legend>SafeSearch Filtering</legend>'; pageBody += "<p>Google's SafeSearch blocks web pages containing explicit sexual content from appearing in search results.</p><p>"; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_active" value="active"' + ((safeSearch=='active')?' checked="checked"':'') + '> Use strict filtering (Filter both explicit text and explicit images)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_images" value="images"' + ((safeSearch != 'active' && safeSearch != 'off')?' checked="checked"':'') + '> Use moderate filtering (Filter explicit images only - default behavior)</label><br />'; pageBody += '<label><input type="radio" name="GPWOC_safe" id="GPWOC_safe_off" value="off"' + ((safeSearch=='off')?' checked="checked"':'') + '> Do not filter my search results</label></p>'; pageBody += '</fieldset>'; // number of results per page if (numberOfResults != parseInt(numberOfResults) || numberOfResults < 1 || numberOfResults > 100) { numberOfResults = 10; } pageBody += '<fieldset><legend>Number of Results</legend>'; pageBody += "<p>Google's default (10) provides the fastest results.</p>"; pageBody += '<p><label>Display <select id="GPWOC_num" name="num">'; for (i = 1; i <= 100; i++) { pageBody += '<option value="' + i + '"' + ((numberOfResults==i)?' selected="selected"':'') + '>' + i + '</option>'; } pageBody += '</select> results per page.</label></p></fieldset>'; // confirm button pageBody += '<p><input type="button" id="GPWOC_save" value="Save Preferences" /></p>'; // script link pageBody += '<p id="scriptLinkParagraph">This page is generated by <a href="http://userscripts.org/scripts/show/64112">Google Preferences Without Cookies</a>. Google is a trademark of Google Inc., which does not endorse this user script.</p>'; document.getElementsByTagName('body')[0].innerHTML = pageBody; // save values document.getElementById('GPWOC_save').addEventListener('click', function (event) { // interface language GM_setValue('google.hl', document.getElementById('GPWOC_hl').value); // search languages if (document.getElementById('GPWOC_searchAll').checked) { GM_setValue('google.lr', ''); } else if (document.getElementById('GPWOC_searchSome').checked) { var searchLanguages = ''; for (i = 1; i <= searchLanguagesLength; i++) { if (document.getElementById('GPWOC_lr' + i).checked) { searchLanguages += document.getElementById('GPWOC_lr' + i).value + '|'; } } if (searchLanguages.length > 0) { searchLanguages = searchLanguages.substring(0, searchLanguages.length - 1); // get rid of last '|' } GM_setValue('google.lr', searchLanguages); } // safesearch if (document.getElementById('GPWOC_safe_active').checked) { GM_setValue('google.safe', 'active'); } else if (document.getElementById('GPWOC_safe_images').checked) { GM_setValue('google.safe', 'images'); } else if (document.getElementById('GPWOC_safe_off').checked) { GM_setValue('google.safe', 'off'); } // results per page GM_setValue('google.num', document.getElementById('GPWOC_num').value); alert('Your preferences have been saved.'); }, true); } else { // if not on the preferences page, find a link to the preferences page var links = document.getElementsByTagName('a'); for (i = 0; i < links.length; i++) { if (links[i].href && links[i].href.indexOf('/preferences') >= 0) { links[i].innerHTML = 'Google Preferences Without Cookies'; break; } } }
checkURL
identifier_name
api_op_Search.go
// Code generated by smithy-go-codegen DO NOT EDIT. package resourceexplorer2 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Searches for resources and displays details about all resources that match the // specified criteria. You must specify a query string. All search queries must use // a view. If you don't explicitly specify a view, then Amazon Web Services // Resource Explorer uses the default view for the Amazon Web Services Region in // which you call this operation. The results are the logical intersection of the // results that match both the QueryString parameter supplied to this operation // and the SearchFilter parameter attached to the view. For the complete syntax // supported by the QueryString parameter, see Search query syntax reference for // Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/APIReference/about-query-syntax.html) // . If your search results are empty, or are missing results that you think should // be there, see Troubleshooting Resource Explorer search (https://docs.aws.amazon.com/resource-explorer/latest/userguide/troubleshooting_search.html) // . func (c *Client) Search(ctx context.Context, params *SearchInput, optFns ...func(*Options)) (*SearchOutput, error) { if params == nil { params = &SearchInput{} } result, metadata, err := c.invokeOperation(ctx, "Search", params, optFns, c.addOperationSearchMiddlewares) if err != nil { return nil, err } out := result.(*SearchOutput) out.ResultMetadata = metadata return out, nil } type SearchInput struct { // A string that includes keywords and filters that specify the resources that you // want to include in the results. For the complete syntax supported by the // QueryString parameter, see Search query syntax reference for Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/userguide/using-search-query-syntax.html) // . The search is completely case insensitive. You can specify an empty string to // return all results up to the limit of 1,000 total results. The operation can // return only the first 1,000 results. If the resource you want is not included, // then use a different value for QueryString to refine the results. // // This member is required. QueryString *string // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. MaxResults *int32 // The parameter for receiving additional results if you receive a NextToken // response in a previous request. A NextToken response indicates that more output // is available. Set this parameter to the value of the previous call's NextToken // response to indicate where the output should continue from. NextToken *string // Specifies the Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view to use for the query. If you don't specify a value for this // parameter, then the operation automatically uses the default view for the Amazon // Web Services Region in which you called this operation. If the Region either // doesn't have a default view or if you don't have permission to use the default // view, then the operation fails with a 401 Unauthorized exception. ViewArn *string noSmithyDocumentSerde } type SearchOutput struct { // The number of resources that match the query. Count *types.ResourceCount // If present, indicates that more output is available than is included in the // current response. Use this value in the NextToken request parameter in a // subsequent call to the operation to get the next part of the output. You should // repeat this until the NextToken response element comes back as null . NextToken *string // The list of structures that describe the resources that match the query. Resources []types.Resource // The Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view that this operation used to perform the search. ViewArn *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationSearchMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpSearch{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpSearch{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addSearchResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpSearchValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSearch(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } // SearchAPIClient is a client that implements the Search operation. type SearchAPIClient interface { Search(context.Context, *SearchInput, ...func(*Options)) (*SearchOutput, error) } var _ SearchAPIClient = (*Client)(nil) // SearchPaginatorOptions is the paginator options for Search type SearchPaginatorOptions struct { // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. Limit int32 // Set to true if pagination should stop if the service returns a pagination token // that matches the most recent token provided to the service. StopOnDuplicateToken bool } // SearchPaginator is a paginator for Search type SearchPaginator struct { options SearchPaginatorOptions client SearchAPIClient params *SearchInput nextToken *string firstPage bool } // NewSearchPaginator returns a new SearchPaginator func NewSearchPaginator(client SearchAPIClient, params *SearchInput, optFns ...func(*SearchPaginatorOptions)) *SearchPaginator { if params == nil { params = &SearchInput{} } options := SearchPaginatorOptions{} if params.MaxResults != nil { options.Limit = *params.MaxResults } for _, fn := range optFns { fn(&options) } return &SearchPaginator{ options: options, client: client, params: params, firstPage: true, nextToken: params.NextToken, } } // HasMorePages returns a boolean indicating whether more pages are available func (p *SearchPaginator) HasMorePages() bool { return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) } // NextPage retrieves the next Search page. func (p *SearchPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*SearchOutput, error) { if !p.HasMorePages() { return nil, fmt.Errorf("no more pages available") } params := *p.params params.NextToken = p.nextToken var limit *int32 if p.options.Limit > 0 { limit = &p.options.Limit } params.MaxResults = limit result, err := p.client.Search(ctx, &params, optFns...) if err != nil { return nil, err } p.firstPage = false prevToken := p.nextToken p.nextToken = result.NextToken if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { p.nextToken = nil } return result, nil } func newServiceMetadataMiddleware_opSearch(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "resource-explorer-2", OperationName: "Search", } } type opSearchResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opSearchResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opSearchResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "resource-explorer-2" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "resource-explorer-2" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("resource-explorer-2") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func addSearchResolveEndpointMiddleware(stack *middleware.Stack, options Options) error
{ return stack.Serialize.Insert(&opSearchResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
identifier_body
api_op_Search.go
// Code generated by smithy-go-codegen DO NOT EDIT. package resourceexplorer2 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Searches for resources and displays details about all resources that match the // specified criteria. You must specify a query string. All search queries must use // a view. If you don't explicitly specify a view, then Amazon Web Services // Resource Explorer uses the default view for the Amazon Web Services Region in // which you call this operation. The results are the logical intersection of the // results that match both the QueryString parameter supplied to this operation // and the SearchFilter parameter attached to the view. For the complete syntax // supported by the QueryString parameter, see Search query syntax reference for // Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/APIReference/about-query-syntax.html) // . If your search results are empty, or are missing results that you think should // be there, see Troubleshooting Resource Explorer search (https://docs.aws.amazon.com/resource-explorer/latest/userguide/troubleshooting_search.html) // . func (c *Client) Search(ctx context.Context, params *SearchInput, optFns ...func(*Options)) (*SearchOutput, error) { if params == nil { params = &SearchInput{} } result, metadata, err := c.invokeOperation(ctx, "Search", params, optFns, c.addOperationSearchMiddlewares) if err != nil { return nil, err } out := result.(*SearchOutput) out.ResultMetadata = metadata return out, nil } type SearchInput struct { // A string that includes keywords and filters that specify the resources that you // want to include in the results. For the complete syntax supported by the // QueryString parameter, see Search query syntax reference for Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/userguide/using-search-query-syntax.html) // . The search is completely case insensitive. You can specify an empty string to // return all results up to the limit of 1,000 total results. The operation can // return only the first 1,000 results. If the resource you want is not included, // then use a different value for QueryString to refine the results. // // This member is required. QueryString *string // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. MaxResults *int32 // The parameter for receiving additional results if you receive a NextToken // response in a previous request. A NextToken response indicates that more output // is available. Set this parameter to the value of the previous call's NextToken // response to indicate where the output should continue from. NextToken *string // Specifies the Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view to use for the query. If you don't specify a value for this // parameter, then the operation automatically uses the default view for the Amazon // Web Services Region in which you called this operation. If the Region either // doesn't have a default view or if you don't have permission to use the default // view, then the operation fails with a 401 Unauthorized exception. ViewArn *string noSmithyDocumentSerde } type SearchOutput struct { // The number of resources that match the query. Count *types.ResourceCount // If present, indicates that more output is available than is included in the // current response. Use this value in the NextToken request parameter in a // subsequent call to the operation to get the next part of the output. You should // repeat this until the NextToken response element comes back as null . NextToken *string // The list of structures that describe the resources that match the query. Resources []types.Resource // The Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view that this operation used to perform the search. ViewArn *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationSearchMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpSearch{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpSearch{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addSearchResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpSearchValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSearch(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } // SearchAPIClient is a client that implements the Search operation. type SearchAPIClient interface { Search(context.Context, *SearchInput, ...func(*Options)) (*SearchOutput, error) } var _ SearchAPIClient = (*Client)(nil) // SearchPaginatorOptions is the paginator options for Search type SearchPaginatorOptions struct { // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. Limit int32 // Set to true if pagination should stop if the service returns a pagination token // that matches the most recent token provided to the service. StopOnDuplicateToken bool } // SearchPaginator is a paginator for Search type SearchPaginator struct { options SearchPaginatorOptions client SearchAPIClient params *SearchInput nextToken *string firstPage bool } // NewSearchPaginator returns a new SearchPaginator func NewSearchPaginator(client SearchAPIClient, params *SearchInput, optFns ...func(*SearchPaginatorOptions)) *SearchPaginator { if params == nil { params = &SearchInput{} } options := SearchPaginatorOptions{} if params.MaxResults != nil { options.Limit = *params.MaxResults } for _, fn := range optFns { fn(&options) } return &SearchPaginator{ options: options, client: client, params: params, firstPage: true, nextToken: params.NextToken, } } // HasMorePages returns a boolean indicating whether more pages are available func (p *SearchPaginator) HasMorePages() bool { return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) } // NextPage retrieves the next Search page. func (p *SearchPaginator)
(ctx context.Context, optFns ...func(*Options)) (*SearchOutput, error) { if !p.HasMorePages() { return nil, fmt.Errorf("no more pages available") } params := *p.params params.NextToken = p.nextToken var limit *int32 if p.options.Limit > 0 { limit = &p.options.Limit } params.MaxResults = limit result, err := p.client.Search(ctx, &params, optFns...) if err != nil { return nil, err } p.firstPage = false prevToken := p.nextToken p.nextToken = result.NextToken if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { p.nextToken = nil } return result, nil } func newServiceMetadataMiddleware_opSearch(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "resource-explorer-2", OperationName: "Search", } } type opSearchResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opSearchResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opSearchResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "resource-explorer-2" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "resource-explorer-2" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("resource-explorer-2") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func addSearchResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opSearchResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
NextPage
identifier_name
api_op_Search.go
// Code generated by smithy-go-codegen DO NOT EDIT. package resourceexplorer2 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Searches for resources and displays details about all resources that match the // specified criteria. You must specify a query string. All search queries must use // a view. If you don't explicitly specify a view, then Amazon Web Services // Resource Explorer uses the default view for the Amazon Web Services Region in // which you call this operation. The results are the logical intersection of the // results that match both the QueryString parameter supplied to this operation // and the SearchFilter parameter attached to the view. For the complete syntax // supported by the QueryString parameter, see Search query syntax reference for // Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/APIReference/about-query-syntax.html) // . If your search results are empty, or are missing results that you think should // be there, see Troubleshooting Resource Explorer search (https://docs.aws.amazon.com/resource-explorer/latest/userguide/troubleshooting_search.html) // . func (c *Client) Search(ctx context.Context, params *SearchInput, optFns ...func(*Options)) (*SearchOutput, error) { if params == nil { params = &SearchInput{} } result, metadata, err := c.invokeOperation(ctx, "Search", params, optFns, c.addOperationSearchMiddlewares) if err != nil { return nil, err } out := result.(*SearchOutput) out.ResultMetadata = metadata return out, nil } type SearchInput struct { // A string that includes keywords and filters that specify the resources that you // want to include in the results. For the complete syntax supported by the // QueryString parameter, see Search query syntax reference for Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/userguide/using-search-query-syntax.html) // . The search is completely case insensitive. You can specify an empty string to // return all results up to the limit of 1,000 total results. The operation can // return only the first 1,000 results. If the resource you want is not included, // then use a different value for QueryString to refine the results. // // This member is required. QueryString *string // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. MaxResults *int32 // The parameter for receiving additional results if you receive a NextToken // response in a previous request. A NextToken response indicates that more output // is available. Set this parameter to the value of the previous call's NextToken // response to indicate where the output should continue from. NextToken *string // Specifies the Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view to use for the query. If you don't specify a value for this // parameter, then the operation automatically uses the default view for the Amazon // Web Services Region in which you called this operation. If the Region either // doesn't have a default view or if you don't have permission to use the default // view, then the operation fails with a 401 Unauthorized exception. ViewArn *string noSmithyDocumentSerde } type SearchOutput struct { // The number of resources that match the query. Count *types.ResourceCount // If present, indicates that more output is available than is included in the // current response. Use this value in the NextToken request parameter in a // subsequent call to the operation to get the next part of the output. You should // repeat this until the NextToken response element comes back as null . NextToken *string // The list of structures that describe the resources that match the query. Resources []types.Resource // The Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view that this operation used to perform the search. ViewArn *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationSearchMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpSearch{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpSearch{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil
if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addSearchResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpSearchValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSearch(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } // SearchAPIClient is a client that implements the Search operation. type SearchAPIClient interface { Search(context.Context, *SearchInput, ...func(*Options)) (*SearchOutput, error) } var _ SearchAPIClient = (*Client)(nil) // SearchPaginatorOptions is the paginator options for Search type SearchPaginatorOptions struct { // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. Limit int32 // Set to true if pagination should stop if the service returns a pagination token // that matches the most recent token provided to the service. StopOnDuplicateToken bool } // SearchPaginator is a paginator for Search type SearchPaginator struct { options SearchPaginatorOptions client SearchAPIClient params *SearchInput nextToken *string firstPage bool } // NewSearchPaginator returns a new SearchPaginator func NewSearchPaginator(client SearchAPIClient, params *SearchInput, optFns ...func(*SearchPaginatorOptions)) *SearchPaginator { if params == nil { params = &SearchInput{} } options := SearchPaginatorOptions{} if params.MaxResults != nil { options.Limit = *params.MaxResults } for _, fn := range optFns { fn(&options) } return &SearchPaginator{ options: options, client: client, params: params, firstPage: true, nextToken: params.NextToken, } } // HasMorePages returns a boolean indicating whether more pages are available func (p *SearchPaginator) HasMorePages() bool { return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) } // NextPage retrieves the next Search page. func (p *SearchPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*SearchOutput, error) { if !p.HasMorePages() { return nil, fmt.Errorf("no more pages available") } params := *p.params params.NextToken = p.nextToken var limit *int32 if p.options.Limit > 0 { limit = &p.options.Limit } params.MaxResults = limit result, err := p.client.Search(ctx, &params, optFns...) if err != nil { return nil, err } p.firstPage = false prevToken := p.nextToken p.nextToken = result.NextToken if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { p.nextToken = nil } return result, nil } func newServiceMetadataMiddleware_opSearch(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "resource-explorer-2", OperationName: "Search", } } type opSearchResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opSearchResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opSearchResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "resource-explorer-2" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "resource-explorer-2" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("resource-explorer-2") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func addSearchResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opSearchResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region, UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
{ return err }
conditional_block
api_op_Search.go
// Code generated by smithy-go-codegen DO NOT EDIT. package resourceexplorer2 import ( "context" "errors" "fmt" "github.com/aws/aws-sdk-go-v2/aws" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" internalauth "github.com/aws/aws-sdk-go-v2/internal/auth" "github.com/aws/aws-sdk-go-v2/service/resourceexplorer2/types" smithyendpoints "github.com/aws/smithy-go/endpoints" "github.com/aws/smithy-go/middleware" smithyhttp "github.com/aws/smithy-go/transport/http" ) // Searches for resources and displays details about all resources that match the // specified criteria. You must specify a query string. All search queries must use // a view. If you don't explicitly specify a view, then Amazon Web Services // Resource Explorer uses the default view for the Amazon Web Services Region in // which you call this operation. The results are the logical intersection of the // results that match both the QueryString parameter supplied to this operation // and the SearchFilter parameter attached to the view. For the complete syntax // supported by the QueryString parameter, see Search query syntax reference for // Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/APIReference/about-query-syntax.html) // . If your search results are empty, or are missing results that you think should // be there, see Troubleshooting Resource Explorer search (https://docs.aws.amazon.com/resource-explorer/latest/userguide/troubleshooting_search.html) // . func (c *Client) Search(ctx context.Context, params *SearchInput, optFns ...func(*Options)) (*SearchOutput, error) { if params == nil { params = &SearchInput{} } result, metadata, err := c.invokeOperation(ctx, "Search", params, optFns, c.addOperationSearchMiddlewares) if err != nil { return nil, err } out := result.(*SearchOutput) out.ResultMetadata = metadata return out, nil } type SearchInput struct { // A string that includes keywords and filters that specify the resources that you // want to include in the results. For the complete syntax supported by the // QueryString parameter, see Search query syntax reference for Resource Explorer (https://docs.aws.amazon.com/resource-explorer/latest/userguide/using-search-query-syntax.html) // . The search is completely case insensitive. You can specify an empty string to // return all results up to the limit of 1,000 total results. The operation can // return only the first 1,000 results. If the resource you want is not included, // then use a different value for QueryString to refine the results. // // This member is required. QueryString *string // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. MaxResults *int32 // The parameter for receiving additional results if you receive a NextToken // response in a previous request. A NextToken response indicates that more output // is available. Set this parameter to the value of the previous call's NextToken // response to indicate where the output should continue from. NextToken *string // Specifies the Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view to use for the query. If you don't specify a value for this // parameter, then the operation automatically uses the default view for the Amazon // Web Services Region in which you called this operation. If the Region either // doesn't have a default view or if you don't have permission to use the default // view, then the operation fails with a 401 Unauthorized exception. ViewArn *string noSmithyDocumentSerde } type SearchOutput struct { // The number of resources that match the query. Count *types.ResourceCount // If present, indicates that more output is available than is included in the // current response. Use this value in the NextToken request parameter in a // subsequent call to the operation to get the next part of the output. You should // repeat this until the NextToken response element comes back as null . NextToken *string // The list of structures that describe the resources that match the query. Resources []types.Resource // The Amazon resource name (ARN) (https://docs.aws.amazon.com/general/latest/gr/aws-arns-and-namespaces.html) // of the view that this operation used to perform the search. ViewArn *string // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata noSmithyDocumentSerde } func (c *Client) addOperationSearchMiddlewares(stack *middleware.Stack, options Options) (err error) { err = stack.Serialize.Add(&awsRestjson1_serializeOpSearch{}, middleware.After) if err != nil { return err } err = stack.Deserialize.Add(&awsRestjson1_deserializeOpSearch{}, middleware.After) if err != nil { return err } if err = addlegacyEndpointContextSetter(stack, options); err != nil { return err } if err = addSetLoggerMiddleware(stack, options); err != nil { return err } if err = awsmiddleware.AddClientRequestIDMiddleware(stack); err != nil { return err } if err = smithyhttp.AddComputeContentLengthMiddleware(stack); err != nil { return err } if err = addResolveEndpointMiddleware(stack, options); err != nil { return err } if err = v4.AddComputePayloadSHA256Middleware(stack); err != nil { return err } if err = addRetryMiddlewares(stack, options); err != nil { return err } if err = addHTTPSignerV4Middleware(stack, options); err != nil { return err } if err = awsmiddleware.AddRawResponseToMetadata(stack); err != nil { return err } if err = awsmiddleware.AddRecordResponseTiming(stack); err != nil { return err } if err = addClientUserAgent(stack, options); err != nil { return err } if err = smithyhttp.AddErrorCloseResponseBodyMiddleware(stack); err != nil { return err } if err = smithyhttp.AddCloseResponseBodyMiddleware(stack); err != nil { return err } if err = addSearchResolveEndpointMiddleware(stack, options); err != nil { return err } if err = addOpSearchValidationMiddleware(stack); err != nil { return err } if err = stack.Initialize.Add(newServiceMetadataMiddleware_opSearch(options.Region), middleware.Before); err != nil { return err } if err = awsmiddleware.AddRecursionDetection(stack); err != nil { return err } if err = addRequestIDRetrieverMiddleware(stack); err != nil { return err } if err = addResponseErrorMiddleware(stack); err != nil { return err } if err = addRequestResponseLogging(stack, options); err != nil { return err } if err = addendpointDisableHTTPSMiddleware(stack, options); err != nil { return err } return nil } // SearchAPIClient is a client that implements the Search operation. type SearchAPIClient interface { Search(context.Context, *SearchInput, ...func(*Options)) (*SearchOutput, error) } var _ SearchAPIClient = (*Client)(nil) // SearchPaginatorOptions is the paginator options for Search type SearchPaginatorOptions struct { // The maximum number of results that you want included on each page of the // response. If you do not include this parameter, it defaults to a value // appropriate to the operation. If additional items exist beyond those included in // the current response, the NextToken response element is present and has a value // (is not null). Include that value as the NextToken request parameter in the // next call to the operation to get the next part of the results. An API operation // can return fewer results than the maximum even when there are more results // available. You should check NextToken after every operation to ensure that you // receive all of the results. Limit int32 // Set to true if pagination should stop if the service returns a pagination token // that matches the most recent token provided to the service. StopOnDuplicateToken bool } // SearchPaginator is a paginator for Search type SearchPaginator struct { options SearchPaginatorOptions client SearchAPIClient params *SearchInput nextToken *string firstPage bool } // NewSearchPaginator returns a new SearchPaginator func NewSearchPaginator(client SearchAPIClient, params *SearchInput, optFns ...func(*SearchPaginatorOptions)) *SearchPaginator { if params == nil { params = &SearchInput{} } options := SearchPaginatorOptions{} if params.MaxResults != nil { options.Limit = *params.MaxResults } for _, fn := range optFns { fn(&options) } return &SearchPaginator{ options: options, client: client, params: params, firstPage: true, nextToken: params.NextToken, } } // HasMorePages returns a boolean indicating whether more pages are available func (p *SearchPaginator) HasMorePages() bool { return p.firstPage || (p.nextToken != nil && len(*p.nextToken) != 0) } // NextPage retrieves the next Search page. func (p *SearchPaginator) NextPage(ctx context.Context, optFns ...func(*Options)) (*SearchOutput, error) { if !p.HasMorePages() { return nil, fmt.Errorf("no more pages available") } params := *p.params params.NextToken = p.nextToken var limit *int32 if p.options.Limit > 0 { limit = &p.options.Limit } params.MaxResults = limit result, err := p.client.Search(ctx, &params, optFns...) if err != nil { return nil, err } p.firstPage = false prevToken := p.nextToken p.nextToken = result.NextToken if p.options.StopOnDuplicateToken && prevToken != nil && p.nextToken != nil && *prevToken == *p.nextToken { p.nextToken = nil } return result, nil } func newServiceMetadataMiddleware_opSearch(region string) *awsmiddleware.RegisterServiceMetadata { return &awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "resource-explorer-2", OperationName: "Search", } } type opSearchResolveEndpointMiddleware struct { EndpointResolver EndpointResolverV2 BuiltInResolver builtInParameterResolver } func (*opSearchResolveEndpointMiddleware) ID() string { return "ResolveEndpointV2" } func (m *opSearchResolveEndpointMiddleware) HandleSerialize(ctx context.Context, in middleware.SerializeInput, next middleware.SerializeHandler) ( out middleware.SerializeOutput, metadata middleware.Metadata, err error, ) { if awsmiddleware.GetRequiresLegacyEndpoints(ctx) { return next.HandleSerialize(ctx, in) } req, ok := in.Request.(*smithyhttp.Request) if !ok { return out, metadata, fmt.Errorf("unknown transport type %T", in.Request) } if m.EndpointResolver == nil { return out, metadata, fmt.Errorf("expected endpoint resolver to not be nil") } params := EndpointParameters{} m.BuiltInResolver.ResolveBuiltIns(&params) var resolvedEndpoint smithyendpoints.Endpoint resolvedEndpoint, err = m.EndpointResolver.ResolveEndpoint(ctx, params) if err != nil { return out, metadata, fmt.Errorf("failed to resolve service endpoint, %w", err) } req.URL = &resolvedEndpoint.URI for k := range resolvedEndpoint.Headers { req.Header.Set( k, resolvedEndpoint.Headers.Get(k), ) } authSchemes, err := internalauth.GetAuthenticationSchemes(&resolvedEndpoint.Properties) if err != nil { var nfe *internalauth.NoAuthenticationSchemesFoundError if errors.As(err, &nfe) { // if no auth scheme is found, default to sigv4 signingName := "resource-explorer-2" signingRegion := m.BuiltInResolver.(*builtInResolver).Region ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) } var ue *internalauth.UnSupportedAuthenticationSchemeSpecifiedError if errors.As(err, &ue) { return out, metadata, fmt.Errorf( "This operation requests signer version(s) %v but the client only supports %v", ue.UnsupportedSchemes, internalauth.SupportedSchemes, ) } } for _, authScheme := range authSchemes { switch authScheme.(type) { case *internalauth.AuthenticationSchemeV4: v4Scheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4) var signingName, signingRegion string if v4Scheme.SigningName == nil { signingName = "resource-explorer-2" } else { signingName = *v4Scheme.SigningName } if v4Scheme.SigningRegion == nil { signingRegion = m.BuiltInResolver.(*builtInResolver).Region } else { signingRegion = *v4Scheme.SigningRegion } if v4Scheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4Scheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, signingName) ctx = awsmiddleware.SetSigningRegion(ctx, signingRegion) break case *internalauth.AuthenticationSchemeV4A: v4aScheme, _ := authScheme.(*internalauth.AuthenticationSchemeV4A) if v4aScheme.SigningName == nil { v4aScheme.SigningName = aws.String("resource-explorer-2") } if v4aScheme.DisableDoubleEncoding != nil { // The signer sets an equivalent value at client initialization time. // Setting this context value will cause the signer to extract it // and override the value set at client initialization time. ctx = internalauth.SetDisableDoubleEncoding(ctx, *v4aScheme.DisableDoubleEncoding) } ctx = awsmiddleware.SetSigningName(ctx, *v4aScheme.SigningName) ctx = awsmiddleware.SetSigningRegion(ctx, v4aScheme.SigningRegionSet[0]) break case *internalauth.AuthenticationSchemeNone: break } } return next.HandleSerialize(ctx, in) } func addSearchResolveEndpointMiddleware(stack *middleware.Stack, options Options) error { return stack.Serialize.Insert(&opSearchResolveEndpointMiddleware{ EndpointResolver: options.EndpointResolverV2, BuiltInResolver: &builtInResolver{ Region: options.Region,
UseFIPS: options.EndpointOptions.UseFIPSEndpoint, Endpoint: options.BaseEndpoint, }, }, "ResolveEndpoint", middleware.After) }
random_line_split
imap.go
package Pinger import ( "bufio" "bytes" "crypto/tls" "encoding/base64" "errors" "fmt" "github.com/nachocove/Pinger/Utils" "github.com/nachocove/Pinger/Utils/Logging" "math/rand" "net" "net/url" "regexp" "strconv" "strings" "sync" "time" ) // IMAP Commands const ( IMAP_EXISTS string = "EXISTS" IMAP_EXPUNGE string = "EXPUNGE" IMAP_EXAMINE string = "EXAMINE" IMAP_IDLE string = "IDLE" IMAP_DONE string = "DONE" IMAP_NOOP string = "NOOP" IMAP_UIDNEXT string = "[UIDNEXT" IMAP_STATUS string = "STATUS" IMAP_STATUS_QUERY string = "(MESSAGES UIDNEXT)" ) // Timeout values for the Dial functions. const ( netTimeout = 30 * time.Second // Time to establish a TCP connection POLLING_INTERVAL = 30 replyTimeout = 300 * time.Second // Time to wait on server response ) type cmdTag struct { id []byte seq uint64 } type IMAPClient struct { debug bool logger *Logging.Logger pi *MailPingInformation wg *sync.WaitGroup mutex *sync.Mutex cancelled bool url *url.URL tlsConfig *tls.Config tlsConn *tls.Conn scanner *bufio.Scanner tag *cmdTag isIdling bool hasNewEmail bool } var prng *rand.Rand var commandTerminator []byte var IOTimeoutError error func init() { prng = rand.New(&prngSource{src: rand.NewSource(time.Now().UnixNano())}) commandTerminator = []byte("\r\n") IOTimeoutError = fmt.Errorf("I/O Timeout Error") } func (imap *IMAPClient) getLogPrefix() string { return imap.pi.getLogPrefix() + "|protocol=IMAP" + "|tag=" + string(imap.tag.id) + ":" + strconv.FormatUint(imap.tag.seq, 10) } func (imap *IMAPClient) Debug(format string, args ...interface{}) { imap.logger.Debug(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Info(format string, args ...interface{}) { imap.logger.Info(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Error(format string, args ...interface{}) { imap.logger.Error(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Warning(format string, args ...interface{}) { imap.logger.Warning(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func NewIMAPClient(pi *MailPingInformation, wg *sync.WaitGroup, debug bool, logger *Logging.Logger) (*IMAPClient, error) { imap := IMAPClient{ debug: debug, logger: logger.Copy(), pi: pi, wg: wg, mutex: &sync.Mutex{}, cancelled: false, tag: genNewCmdTag(0), } imap.logger.SetCallDepth(1) imap.Info("Created new IMAP Client|msgCode=IMAP_CLIENT_CREATED") return &imap, nil } func (imap *IMAPClient) sendError(errCh chan error, err error) { logError(err, imap.logger) errCh <- err } type prngSource struct { mu sync.Mutex src rand.Source } func (r *prngSource) Int63() (n int64) { r.mu.Lock() n = r.src.Int63() r.mu.Unlock() return } func (r *prngSource) Seed(seed int64) { r.mu.Lock() r.src.Seed(seed) r.mu.Unlock() } func genNewCmdTag(n uint) *cmdTag { if n < 1 || 26 < n { n = 5 } id := make([]byte, n, n+20) for i, v := range prng.Perm(26)[:n] { id[i] = 'A' + byte(v) } return &cmdTag{id, 0} } func (t *cmdTag) Next() string { t.seq++ return string(strconv.AppendUint(t.id, t.seq, 10)) } func (t *cmdTag) String() string { return fmt.Sprintf("%s%d", t.id, t.seq) } func (imap *IMAPClient) setupScanner() { imap.scanner = bufio.NewScanner(imap.tlsConn) imap.scanner.Split(bufio.ScanLines) } func (imap *IMAPClient) isContinueResponse(response string) bool { if len(response) > 0 && response[0] == '+' { return true } else { return false } } func (imap *IMAPClient) isOKResponse(response string) bool { tokens := strings.Split(response, " ") if len(tokens) >= 2 && tokens[1] == "OK" { return true } else { return false } } func (imap *IMAPClient) handleGreeting() error { imap.Debug("Handle Greeting") response, err := imap.getServerResponse(uint64(replyTimeout / time.Millisecond)) if err == nil { imap.Info("Connected|host=%s|tag=%s", imap.url.Host, imap.tag.id) if imap.isOKResponse(response) { imap.Info("Greeting from server: %s", response) return nil } else { err := fmt.Errorf("Did not get proper response from imap server|err=%s", response) return err } } return err } func (imap *IMAPClient) doImapAuth() (authSucess bool, err error) { imap.Info("Authenticating with authblob") decodedBlob, err := base64.StdEncoding.DecodeString(imap.pi.IMAPAuthenticationBlob) if err != nil { imap.Error("Error decoding AuthBlob") return false, err } responses, err := imap.doIMAPCommand(fmt.Sprintf("%s %s", imap.tag.Next(), decodedBlob), uint64(replyTimeout/time.Millisecond)) if err != nil { return false, err } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if imap.isContinueResponse(lastResponse) { // auth failed imap.Debug("Authentication failed: %s", lastResponse) responses, err = imap.doIMAPCommand(" ", uint64(replyTimeout/time.Millisecond)) } if !imap.isOKResponse(lastResponse) { return false, err } } imap.Debug("Authentication successful|msgCode=IMAP_AUTH_SUCCESS") return true, nil } func (imap *IMAPClient) parseEXAMINEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") valueToken := "" if tokens[0] == "*" && tokens[2] == IMAP_EXISTS { valueToken = tokens[1] } else if tokens[0] == "*" && tokens[2] == IMAP_UIDNEXT { valueToken = tokens[3][:len(tokens[3])-1] } if valueToken != "" { value, err := strconv.Atoi(valueToken) if err != nil { imap.Warning("Cannot parse value from response : %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } //* STATUS "INBOX" (MESSAGES 18 UIDNEXT 41) func (imap *IMAPClient) parseSTATUSResponse(response string) (uint32, uint32) { re := regexp.MustCompile(".*(MESSAGES (?P<messageCount>[0-9]+) UIDNEXT (?P<UIDNext>[0-9]+))") r2 := re.FindStringSubmatch(response) if len(r2) == 0 { return 0, 0 } messageCountStr := r2[2] UIDNextStr := r2[3] messageCount, err := strconv.Atoi(messageCountStr) if err != nil { imap.Warning("Cannot parse value from %s", messageCountStr) messageCount = 0 } UIDNext, err := strconv.Atoi(UIDNextStr) if err != nil { imap.Warning("Cannot parse value from %s", UIDNextStr) UIDNext = 0 } return uint32(messageCount), uint32(UIDNext) } func (imap *IMAPClient) parseIDLEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") if tokens[0] == "*" && (tokens[2] == IMAP_EXISTS || tokens[2] == IMAP_EXPUNGE) { value, err := strconv.Atoi(tokens[1]) if err != nil { imap.Warning("Cannot parse value from %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } func (imap *IMAPClient) doExamine() error { command := fmt.Sprintf("%s %s %s", imap.tag.Next(), IMAP_EXAMINE, imap.pi.IMAPFolderName) imap.Debug("IMAPFolder=%s", imap.pi.IMAPFolderName) _, err := imap.doIMAPCommand(command, uint64(replyTimeout/time.Millisecond)) return err } func (imap *IMAPClient) sendIMAPCommand(command string) error { commandName := imap.getNameFromCommand(command) imap.Info("Sending IMAP Command to server|command=%s|msgCode=IMAP_COMMAND_SENT", commandName) //imap.Debug("Sending IMAP Command to server:[%s]", command) if commandName == "IDLE" { imap.Info("Setting isIdling to true.") imap.isIdling = true } if len(command) > 0 { _, err := imap.tlsConn.Write([]byte(command)) if err != nil { return err } _, err = imap.tlsConn.Write(commandTerminator) if err != nil { return err } } return nil } func (imap *IMAPClient) doIMAPCommand(command string, waitTime uint64) ([]string, error) { commandLines := strings.Split(command, "\n") var allResponses []string var err error for _, commandLine := range commandLines { err := imap.sendIMAPCommand(commandLine) if err != nil { imap.Warning("%s", err) return nil, err } if imap.cancelled == true { imap.Info("IMAP Command. Request cancelled. Exiting|msgCode=IMAP_COMMAND_CANCELLED") err = fmt.Errorf("Request cancelled") return nil, err } responses, err := imap.getServerResponses(command, waitTime) if err != nil { return nil, err } if allResponses == nil { allResponses = responses } else { allResponses = append(allResponses, responses...) } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if !imap.isOKResponse(lastResponse) && !imap.isContinueResponse(lastResponse) { err := fmt.Errorf("Did not get proper response from imap server: %s", lastResponse) imap.Debug("%s", err) return allResponses, err } } else { err := fmt.Errorf("Did not get any response from imap server.") imap.Debug("%s", err) return allResponses, err } } return allResponses, err } func (imap *IMAPClient) processResponse(command string, response string) { commandName := imap.getNameFromCommand(command) switch commandName { case "IDLE": imap.Debug("Processing IDLE Response: [%s]", response) count, token := imap.parseIDLEResponse(response) if token == IMAP_EXPUNGE { imap.pi.IMAPEXISTSCount -= 1 imap.Info("%s received. Decrementing count|IMAPEXISTSCount=%d", IMAP_EXPUNGE, imap.pi.IMAPEXISTSCount) } else if token == IMAP_EXISTS && count != imap.pi.IMAPEXISTSCount { imap.Info("Current EXISTS count is different from starting EXISTS count."+ "Resetting count|currentIMAPEXISTSCount=%d|startingIMAPExistsCount=%d", count, imap.pi.IMAPEXISTSCount) imap.Info("Got new mail. Stopping IDLE|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPEXISTSCount = count err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP Command|command=%s|err=%s", IMAP_DONE, err) } } case "EXAMINE": imap.Debug("Processing EXAMINE Response: [%s]", response) count, token := imap.parseEXAMINEResponse(response) if token == IMAP_EXISTS { imap.Info("Saving starting EXISTS count|IMAPEXISTSCount=%d||msgCode=IMAP_STARTING_EXISTS_COUNT", count) imap.pi.IMAPEXISTSCount = count } else if token == IMAP_UIDNEXT { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", count) imap.pi.IMAPUIDNEXT = count } case "STATUS": imap.Debug("Processing STATUS Response: [%s]", response) _, UIDNext := imap.parseSTATUSResponse(response) if UIDNext != 0 { if imap.pi.IMAPUIDNEXT == 0 { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", UIDNext) imap.pi.IMAPUIDNEXT = UIDNext } else if UIDNext != imap.pi.IMAPUIDNEXT { imap.Info("Current UIDNext is different from starting UIDNext."+ " Resetting UIDNext|currentUIDNext=%d|startingUIDNext=%d|msgCode=IMAP_RESET_UIDNEXT", UIDNext, imap.pi.IMAPUIDNEXT) imap.Info("Got new mail|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPUIDNEXT = UIDNext } else { imap.Debug("Current UIDNext is the same as starting UIDNext|currentUIDNext=%d|startingUIDNext=%d", UIDNext, imap.pi.IMAPUIDNEXT) } } } } func (imap *IMAPClient) isFinalResponse(command string, response string) bool { tokens := strings.Split(command, " ") if len(response) >= 2 && response[0:2] == "+ " && imap.getNameFromCommand(command) != "IDLE" { return true } else if len(tokens) > 0 { token := tokens[0] if len(response) >= len(token) && token == response[0:len(token)] { return true } } return false } func (imap *IMAPClient) getNameFromCommand(command string) string { commandTokens := strings.Split(command, " ") if len(commandTokens) > 1 { return commandTokens[1] } return "" } func (imap *IMAPClient) getServerResponses(command string, waitTime uint64) ([]string, error) { completed := false responses := make([]string, 0) imap.Debug("Getting Server Responses") for completed == false { if imap.getNameFromCommand(command) == "IDLE" { waitTime = 0 imap.Debug("IDLE Command|timeout=%d", waitTime) } response, err := imap.getServerResponse(waitTime) if err != nil { imap.Debug("Returning err %s", err) return responses, err } else { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("<%s command response redacted>", imap.getNameFromCommand(command)) } else { imap.Debug("IMAP Server Response is %s", response) } responses = append(responses, response) imap.processResponse(command, response) if imap.isFinalResponse(command, response) { if imap.getNameFromCommand(command) == "IDLE" { imap.Info("Setting isIdling to false|msgCode=IMAP_STOP_IDLE") imap.isIdling = false } for i, r := range responses { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("%d: <%s command response redacted>", i, imap.getNameFromCommand(command)) } else { imap.Debug("%d: %s", i, r) } } break } } } return responses, nil } func (imap *IMAPClient) getServerResponse(waitTime uint64) (string, error) { imap.Debug("Getting server response|timeout=%d", waitTime) if waitTime > 0 { waitUntil := time.Now().Add(time.Duration(waitTime) * time.Millisecond) imap.tlsConn.SetReadDeadline(waitUntil) } for i := 0; ; i++ { ok := imap.scanner.Scan() if ok { break } else { err := imap.scanner.Err() if err == nil { return "", errors.New("EOF received") } nerr, ok := err.(net.Error) if ok && nerr.Timeout() { imap.Debug("Timeout error|err=%s", nerr) return "", err } else if ok && nerr.Temporary() { if i < 3 { // try three times imap.Info("Temporary error scanning for server response: %s. Will retry...", nerr) time.Sleep(time.Duration(1) * time.Second) } else { imap.Debug("Error scanning for server response: %s.", nerr) return "", err } } else { imap.Debug("Error scanning for server response: %s.", err) return "", err } } } response := imap.scanner.Text() return response, nil } func (imap *IMAPClient) doRequestResponse(request string, responseCh chan []string, responseErrCh chan error) { imap.Debug("Starting doRequestResponse") imap.wg.Add(1) defer Utils.RecoverCrash(imap.logger) imap.mutex.Lock() // prevents the longpoll from cancelling the request while we're still setting it up. unlockMutex := true defer func() { imap.Debug("Exiting doRequestResponse") imap.wg.Done() if unlockMutex { imap.mutex.Unlock() } }() var err error if imap == nil || imap.pi == nil { if imap.logger != nil { imap.Info("doRequestResponse called but structures cleaned up") } return } if imap.tlsConn == nil { imap.Info("doRequestResponse called but tls connection has been cleaned up") return } imap.mutex.Unlock() unlockMutex = false imap.Debug("Executing IMAP Command|timeout=%d", uint64(replyTimeout/time.Millisecond)) responses, err := imap.doIMAPCommand(request, uint64(replyTimeout/time.Millisecond)) if imap.cancelled == true { imap.Info("IMAP Request cancelled. Exiting|msgCode=IMAP_REQ_CANCELLED") return } if err != nil { if imap.isIdling { imap.isIdling = false } imap.Info("Request/Response Error: %s", err) nerr, ok := err.(net.Error) if ok && nerr.Timeout() { responseErrCh <- IOTimeoutError; } else { responseErrCh <- fmt.Errorf("Request/Response Error: %s", err) } return } responseCh <- responses return } func (imap *IMAPClient) setupConn() error { imap.Debug("Setting up TLS connection") if imap.tlsConn != nil { imap.tlsConn.Close() } if imap.url == nil { imapUrl, err := url.Parse(imap.pi.MailServerUrl) if err != nil { imap.Warning("err %s", err) return err } imap.url = imapUrl } host, _, _ := net.SplitHostPort(imap.url.Host) if imap.tlsConfig == nil { imap.tlsConfig = &tls.Config{ ServerName: host, RootCAs: globals.config.RootCerts(), } } conn, err := net.DialTimeout("tcp", imap.url.Host, netTimeout) if err == nil { imap.tlsConn = tls.Client(conn, imap.tlsConfig) if imap.tlsConn == nil { conn.Close() return fmt.Errorf("Cannot create TLS Connection") } } if err != nil { imap.Warning("err %s", err) return err } imap.setupScanner() err = imap.handleGreeting() if err != nil { imap.Warning("err %s", err) return err } return nil } func (imap *IMAPClient) LongPoll(stopPollCh, stopAllCh chan int, errCh chan error) { imap.Info("Starting LongPoll|msgCode=POLLING") if imap.isIdling { imap.Warning("Already idling. Returning|msgCode=IMAP_ALREADY_POLLING") return } imap.wg.Add(1) defer imap.wg.Done() defer Utils.RecoverCrash(imap.logger) defer func() { imap.Info("Stopping LongPoll.") imap.cancel() }() sleepTime := 0 if imap.pi.IMAPSupportsIdle { imap.Debug("IMAP Server supports IDLE") } else { imap.Debug("IMAP Server doesn't support IDLE. Resetting IMAP UIDNEXT|IMAPUIDNEXT=0|msgCode") } imap.pi.IMAPUIDNEXT = 0 for { if sleepTime > 0 { s := time.Duration(sleepTime) * time.Second imap.Debug("Sleeping %s before retry", s) time.Sleep(s) } sleepTime = POLLING_INTERVAL if imap.tlsConn == nil { err := imap.setupConn() if err != nil { imap.Error("Connection setup error: %v", err) errCh <- LongPollReRegister return } authSuccess, err := imap.doImapAuth() if err != nil { imap.Warning("Authentication error (%s). Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } if !authSuccess { imap.Info("Authentication failed. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER") errCh <- LongPollReRegister return } } if imap.pi.IMAPSupportsIdle { imap.Debug("Supporting idle. Running Examine Command") err := imap.doExamine() if err != nil { imap.Warning("Examine failure: %v. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } } imap.Info("Request timeout %d|msgCode=IMAP_POLL_REQ_TIMEDOUT_VALUE", imap.pi.ResponseTimeout) requestTimer := time.NewTimer(time.Duration(imap.pi.ResponseTimeout) * time.Millisecond) responseCh := make(chan []string) responseErrCh := make(chan error) command := IMAP_NOOP if imap.pi.IMAPSupportsIdle { command = fmt.Sprintf("%s %s", imap.tag.Next(), IMAP_IDLE) } else { command = fmt.Sprintf("%s %s %s %s", imap.tag.Next(), IMAP_STATUS, imap.pi.IMAPFolderName, IMAP_STATUS_QUERY) } go imap.doRequestResponse(command, responseCh, responseErrCh) select { case <-requestTimer.C: // request timed out. Start over. imap.Info("Request timed out. Starting over|msgCode=IMAP_POLL_REQ_TIMEDOUT") requestTimer.Stop() imap.cancelIDLE() sleepTime = 1 case err := <-responseErrCh: if err == IOTimeoutError { // just retry on an I/O Timeout. No need for the device to re-register sleepTime = 1 } else { imap.Info("Got error %s. Sending back LongPollReRegister|msgCode=IMAP_ERR_REREGISTER", err) errCh <- LongPollReRegister // erroring out... ask for reregister return } return case <-responseCh: if imap.hasNewEmail { imap.Info("Got mail. Sending LongPollNewMail|msgCode=IMAP_NEW_EMAIL") imap.hasNewEmail = false errCh <- LongPollNewMail return } case <-stopPollCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop. Stopping") return case <-stopAllCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop (allStop). Stopping") return } } } func (imap *IMAPClient) cancelIDLE() { if imap.isIdling { imap.Info("Cancelling outstanding IDLE request") err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP command %s while cancelling IDLE request: %s", IMAP_DONE, err) } } } func (imap *IMAPClient) UpdateRequestData(requestData []byte) { if len(requestData) > 0 && bytes.Compare(requestData, imap.pi.RequestData) != 0 { imap.pi.RequestData = requestData } } func (imap *IMAPClient)
() { imap.mutex.Lock() imap.cancelled = true if imap.tlsConn != nil { imap.cancelIDLE() imap.tlsConn.Close() imap.tlsConn = nil } imap.mutex.Unlock() } func (imap *IMAPClient) Cleanup() { imap.Debug("Cleaning up") imap.cancel() imap.pi.cleanup() imap.pi = nil }
cancel
identifier_name
imap.go
package Pinger import ( "bufio" "bytes" "crypto/tls" "encoding/base64" "errors" "fmt" "github.com/nachocove/Pinger/Utils" "github.com/nachocove/Pinger/Utils/Logging" "math/rand" "net" "net/url" "regexp" "strconv" "strings" "sync" "time" ) // IMAP Commands const ( IMAP_EXISTS string = "EXISTS" IMAP_EXPUNGE string = "EXPUNGE" IMAP_EXAMINE string = "EXAMINE" IMAP_IDLE string = "IDLE" IMAP_DONE string = "DONE" IMAP_NOOP string = "NOOP" IMAP_UIDNEXT string = "[UIDNEXT" IMAP_STATUS string = "STATUS" IMAP_STATUS_QUERY string = "(MESSAGES UIDNEXT)" ) // Timeout values for the Dial functions. const ( netTimeout = 30 * time.Second // Time to establish a TCP connection POLLING_INTERVAL = 30 replyTimeout = 300 * time.Second // Time to wait on server response ) type cmdTag struct { id []byte seq uint64 } type IMAPClient struct { debug bool logger *Logging.Logger pi *MailPingInformation wg *sync.WaitGroup mutex *sync.Mutex cancelled bool url *url.URL tlsConfig *tls.Config tlsConn *tls.Conn scanner *bufio.Scanner tag *cmdTag isIdling bool hasNewEmail bool } var prng *rand.Rand var commandTerminator []byte var IOTimeoutError error func init() { prng = rand.New(&prngSource{src: rand.NewSource(time.Now().UnixNano())}) commandTerminator = []byte("\r\n") IOTimeoutError = fmt.Errorf("I/O Timeout Error") } func (imap *IMAPClient) getLogPrefix() string { return imap.pi.getLogPrefix() + "|protocol=IMAP" + "|tag=" + string(imap.tag.id) + ":" + strconv.FormatUint(imap.tag.seq, 10) } func (imap *IMAPClient) Debug(format string, args ...interface{}) { imap.logger.Debug(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Info(format string, args ...interface{}) { imap.logger.Info(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Error(format string, args ...interface{}) { imap.logger.Error(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Warning(format string, args ...interface{}) { imap.logger.Warning(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func NewIMAPClient(pi *MailPingInformation, wg *sync.WaitGroup, debug bool, logger *Logging.Logger) (*IMAPClient, error) { imap := IMAPClient{ debug: debug, logger: logger.Copy(), pi: pi, wg: wg, mutex: &sync.Mutex{}, cancelled: false, tag: genNewCmdTag(0), } imap.logger.SetCallDepth(1) imap.Info("Created new IMAP Client|msgCode=IMAP_CLIENT_CREATED") return &imap, nil } func (imap *IMAPClient) sendError(errCh chan error, err error) { logError(err, imap.logger) errCh <- err } type prngSource struct { mu sync.Mutex src rand.Source } func (r *prngSource) Int63() (n int64) { r.mu.Lock() n = r.src.Int63() r.mu.Unlock() return } func (r *prngSource) Seed(seed int64) { r.mu.Lock() r.src.Seed(seed) r.mu.Unlock() } func genNewCmdTag(n uint) *cmdTag { if n < 1 || 26 < n { n = 5 } id := make([]byte, n, n+20) for i, v := range prng.Perm(26)[:n] { id[i] = 'A' + byte(v) } return &cmdTag{id, 0} } func (t *cmdTag) Next() string { t.seq++ return string(strconv.AppendUint(t.id, t.seq, 10)) } func (t *cmdTag) String() string { return fmt.Sprintf("%s%d", t.id, t.seq) } func (imap *IMAPClient) setupScanner() { imap.scanner = bufio.NewScanner(imap.tlsConn) imap.scanner.Split(bufio.ScanLines) } func (imap *IMAPClient) isContinueResponse(response string) bool { if len(response) > 0 && response[0] == '+' { return true } else { return false } } func (imap *IMAPClient) isOKResponse(response string) bool { tokens := strings.Split(response, " ") if len(tokens) >= 2 && tokens[1] == "OK" { return true } else { return false } } func (imap *IMAPClient) handleGreeting() error { imap.Debug("Handle Greeting") response, err := imap.getServerResponse(uint64(replyTimeout / time.Millisecond)) if err == nil { imap.Info("Connected|host=%s|tag=%s", imap.url.Host, imap.tag.id) if imap.isOKResponse(response) { imap.Info("Greeting from server: %s", response) return nil } else { err := fmt.Errorf("Did not get proper response from imap server|err=%s", response) return err } } return err } func (imap *IMAPClient) doImapAuth() (authSucess bool, err error) { imap.Info("Authenticating with authblob") decodedBlob, err := base64.StdEncoding.DecodeString(imap.pi.IMAPAuthenticationBlob) if err != nil { imap.Error("Error decoding AuthBlob") return false, err } responses, err := imap.doIMAPCommand(fmt.Sprintf("%s %s", imap.tag.Next(), decodedBlob), uint64(replyTimeout/time.Millisecond)) if err != nil { return false, err } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if imap.isContinueResponse(lastResponse) { // auth failed imap.Debug("Authentication failed: %s", lastResponse) responses, err = imap.doIMAPCommand(" ", uint64(replyTimeout/time.Millisecond)) } if !imap.isOKResponse(lastResponse) { return false, err } } imap.Debug("Authentication successful|msgCode=IMAP_AUTH_SUCCESS") return true, nil } func (imap *IMAPClient) parseEXAMINEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") valueToken := "" if tokens[0] == "*" && tokens[2] == IMAP_EXISTS { valueToken = tokens[1] } else if tokens[0] == "*" && tokens[2] == IMAP_UIDNEXT { valueToken = tokens[3][:len(tokens[3])-1] } if valueToken != "" { value, err := strconv.Atoi(valueToken) if err != nil { imap.Warning("Cannot parse value from response : %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } //* STATUS "INBOX" (MESSAGES 18 UIDNEXT 41) func (imap *IMAPClient) parseSTATUSResponse(response string) (uint32, uint32) { re := regexp.MustCompile(".*(MESSAGES (?P<messageCount>[0-9]+) UIDNEXT (?P<UIDNext>[0-9]+))") r2 := re.FindStringSubmatch(response) if len(r2) == 0 { return 0, 0 } messageCountStr := r2[2] UIDNextStr := r2[3] messageCount, err := strconv.Atoi(messageCountStr) if err != nil { imap.Warning("Cannot parse value from %s", messageCountStr) messageCount = 0 } UIDNext, err := strconv.Atoi(UIDNextStr) if err != nil { imap.Warning("Cannot parse value from %s", UIDNextStr) UIDNext = 0 } return uint32(messageCount), uint32(UIDNext) } func (imap *IMAPClient) parseIDLEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") if tokens[0] == "*" && (tokens[2] == IMAP_EXISTS || tokens[2] == IMAP_EXPUNGE) { value, err := strconv.Atoi(tokens[1]) if err != nil { imap.Warning("Cannot parse value from %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } func (imap *IMAPClient) doExamine() error { command := fmt.Sprintf("%s %s %s", imap.tag.Next(), IMAP_EXAMINE, imap.pi.IMAPFolderName) imap.Debug("IMAPFolder=%s", imap.pi.IMAPFolderName) _, err := imap.doIMAPCommand(command, uint64(replyTimeout/time.Millisecond)) return err } func (imap *IMAPClient) sendIMAPCommand(command string) error { commandName := imap.getNameFromCommand(command) imap.Info("Sending IMAP Command to server|command=%s|msgCode=IMAP_COMMAND_SENT", commandName) //imap.Debug("Sending IMAP Command to server:[%s]", command) if commandName == "IDLE" { imap.Info("Setting isIdling to true.") imap.isIdling = true } if len(command) > 0 { _, err := imap.tlsConn.Write([]byte(command)) if err != nil { return err } _, err = imap.tlsConn.Write(commandTerminator) if err != nil { return err } } return nil } func (imap *IMAPClient) doIMAPCommand(command string, waitTime uint64) ([]string, error) { commandLines := strings.Split(command, "\n") var allResponses []string var err error for _, commandLine := range commandLines { err := imap.sendIMAPCommand(commandLine) if err != nil { imap.Warning("%s", err) return nil, err } if imap.cancelled == true { imap.Info("IMAP Command. Request cancelled. Exiting|msgCode=IMAP_COMMAND_CANCELLED") err = fmt.Errorf("Request cancelled") return nil, err } responses, err := imap.getServerResponses(command, waitTime) if err != nil { return nil, err } if allResponses == nil { allResponses = responses } else { allResponses = append(allResponses, responses...) } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if !imap.isOKResponse(lastResponse) && !imap.isContinueResponse(lastResponse) { err := fmt.Errorf("Did not get proper response from imap server: %s", lastResponse) imap.Debug("%s", err) return allResponses, err } } else { err := fmt.Errorf("Did not get any response from imap server.") imap.Debug("%s", err) return allResponses, err } } return allResponses, err } func (imap *IMAPClient) processResponse(command string, response string) { commandName := imap.getNameFromCommand(command) switch commandName { case "IDLE": imap.Debug("Processing IDLE Response: [%s]", response) count, token := imap.parseIDLEResponse(response) if token == IMAP_EXPUNGE { imap.pi.IMAPEXISTSCount -= 1 imap.Info("%s received. Decrementing count|IMAPEXISTSCount=%d", IMAP_EXPUNGE, imap.pi.IMAPEXISTSCount) } else if token == IMAP_EXISTS && count != imap.pi.IMAPEXISTSCount { imap.Info("Current EXISTS count is different from starting EXISTS count."+ "Resetting count|currentIMAPEXISTSCount=%d|startingIMAPExistsCount=%d", count, imap.pi.IMAPEXISTSCount) imap.Info("Got new mail. Stopping IDLE|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPEXISTSCount = count err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP Command|command=%s|err=%s", IMAP_DONE, err) } } case "EXAMINE": imap.Debug("Processing EXAMINE Response: [%s]", response)
imap.Info("Saving starting EXISTS count|IMAPEXISTSCount=%d||msgCode=IMAP_STARTING_EXISTS_COUNT", count) imap.pi.IMAPEXISTSCount = count } else if token == IMAP_UIDNEXT { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", count) imap.pi.IMAPUIDNEXT = count } case "STATUS": imap.Debug("Processing STATUS Response: [%s]", response) _, UIDNext := imap.parseSTATUSResponse(response) if UIDNext != 0 { if imap.pi.IMAPUIDNEXT == 0 { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", UIDNext) imap.pi.IMAPUIDNEXT = UIDNext } else if UIDNext != imap.pi.IMAPUIDNEXT { imap.Info("Current UIDNext is different from starting UIDNext."+ " Resetting UIDNext|currentUIDNext=%d|startingUIDNext=%d|msgCode=IMAP_RESET_UIDNEXT", UIDNext, imap.pi.IMAPUIDNEXT) imap.Info("Got new mail|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPUIDNEXT = UIDNext } else { imap.Debug("Current UIDNext is the same as starting UIDNext|currentUIDNext=%d|startingUIDNext=%d", UIDNext, imap.pi.IMAPUIDNEXT) } } } } func (imap *IMAPClient) isFinalResponse(command string, response string) bool { tokens := strings.Split(command, " ") if len(response) >= 2 && response[0:2] == "+ " && imap.getNameFromCommand(command) != "IDLE" { return true } else if len(tokens) > 0 { token := tokens[0] if len(response) >= len(token) && token == response[0:len(token)] { return true } } return false } func (imap *IMAPClient) getNameFromCommand(command string) string { commandTokens := strings.Split(command, " ") if len(commandTokens) > 1 { return commandTokens[1] } return "" } func (imap *IMAPClient) getServerResponses(command string, waitTime uint64) ([]string, error) { completed := false responses := make([]string, 0) imap.Debug("Getting Server Responses") for completed == false { if imap.getNameFromCommand(command) == "IDLE" { waitTime = 0 imap.Debug("IDLE Command|timeout=%d", waitTime) } response, err := imap.getServerResponse(waitTime) if err != nil { imap.Debug("Returning err %s", err) return responses, err } else { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("<%s command response redacted>", imap.getNameFromCommand(command)) } else { imap.Debug("IMAP Server Response is %s", response) } responses = append(responses, response) imap.processResponse(command, response) if imap.isFinalResponse(command, response) { if imap.getNameFromCommand(command) == "IDLE" { imap.Info("Setting isIdling to false|msgCode=IMAP_STOP_IDLE") imap.isIdling = false } for i, r := range responses { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("%d: <%s command response redacted>", i, imap.getNameFromCommand(command)) } else { imap.Debug("%d: %s", i, r) } } break } } } return responses, nil } func (imap *IMAPClient) getServerResponse(waitTime uint64) (string, error) { imap.Debug("Getting server response|timeout=%d", waitTime) if waitTime > 0 { waitUntil := time.Now().Add(time.Duration(waitTime) * time.Millisecond) imap.tlsConn.SetReadDeadline(waitUntil) } for i := 0; ; i++ { ok := imap.scanner.Scan() if ok { break } else { err := imap.scanner.Err() if err == nil { return "", errors.New("EOF received") } nerr, ok := err.(net.Error) if ok && nerr.Timeout() { imap.Debug("Timeout error|err=%s", nerr) return "", err } else if ok && nerr.Temporary() { if i < 3 { // try three times imap.Info("Temporary error scanning for server response: %s. Will retry...", nerr) time.Sleep(time.Duration(1) * time.Second) } else { imap.Debug("Error scanning for server response: %s.", nerr) return "", err } } else { imap.Debug("Error scanning for server response: %s.", err) return "", err } } } response := imap.scanner.Text() return response, nil } func (imap *IMAPClient) doRequestResponse(request string, responseCh chan []string, responseErrCh chan error) { imap.Debug("Starting doRequestResponse") imap.wg.Add(1) defer Utils.RecoverCrash(imap.logger) imap.mutex.Lock() // prevents the longpoll from cancelling the request while we're still setting it up. unlockMutex := true defer func() { imap.Debug("Exiting doRequestResponse") imap.wg.Done() if unlockMutex { imap.mutex.Unlock() } }() var err error if imap == nil || imap.pi == nil { if imap.logger != nil { imap.Info("doRequestResponse called but structures cleaned up") } return } if imap.tlsConn == nil { imap.Info("doRequestResponse called but tls connection has been cleaned up") return } imap.mutex.Unlock() unlockMutex = false imap.Debug("Executing IMAP Command|timeout=%d", uint64(replyTimeout/time.Millisecond)) responses, err := imap.doIMAPCommand(request, uint64(replyTimeout/time.Millisecond)) if imap.cancelled == true { imap.Info("IMAP Request cancelled. Exiting|msgCode=IMAP_REQ_CANCELLED") return } if err != nil { if imap.isIdling { imap.isIdling = false } imap.Info("Request/Response Error: %s", err) nerr, ok := err.(net.Error) if ok && nerr.Timeout() { responseErrCh <- IOTimeoutError; } else { responseErrCh <- fmt.Errorf("Request/Response Error: %s", err) } return } responseCh <- responses return } func (imap *IMAPClient) setupConn() error { imap.Debug("Setting up TLS connection") if imap.tlsConn != nil { imap.tlsConn.Close() } if imap.url == nil { imapUrl, err := url.Parse(imap.pi.MailServerUrl) if err != nil { imap.Warning("err %s", err) return err } imap.url = imapUrl } host, _, _ := net.SplitHostPort(imap.url.Host) if imap.tlsConfig == nil { imap.tlsConfig = &tls.Config{ ServerName: host, RootCAs: globals.config.RootCerts(), } } conn, err := net.DialTimeout("tcp", imap.url.Host, netTimeout) if err == nil { imap.tlsConn = tls.Client(conn, imap.tlsConfig) if imap.tlsConn == nil { conn.Close() return fmt.Errorf("Cannot create TLS Connection") } } if err != nil { imap.Warning("err %s", err) return err } imap.setupScanner() err = imap.handleGreeting() if err != nil { imap.Warning("err %s", err) return err } return nil } func (imap *IMAPClient) LongPoll(stopPollCh, stopAllCh chan int, errCh chan error) { imap.Info("Starting LongPoll|msgCode=POLLING") if imap.isIdling { imap.Warning("Already idling. Returning|msgCode=IMAP_ALREADY_POLLING") return } imap.wg.Add(1) defer imap.wg.Done() defer Utils.RecoverCrash(imap.logger) defer func() { imap.Info("Stopping LongPoll.") imap.cancel() }() sleepTime := 0 if imap.pi.IMAPSupportsIdle { imap.Debug("IMAP Server supports IDLE") } else { imap.Debug("IMAP Server doesn't support IDLE. Resetting IMAP UIDNEXT|IMAPUIDNEXT=0|msgCode") } imap.pi.IMAPUIDNEXT = 0 for { if sleepTime > 0 { s := time.Duration(sleepTime) * time.Second imap.Debug("Sleeping %s before retry", s) time.Sleep(s) } sleepTime = POLLING_INTERVAL if imap.tlsConn == nil { err := imap.setupConn() if err != nil { imap.Error("Connection setup error: %v", err) errCh <- LongPollReRegister return } authSuccess, err := imap.doImapAuth() if err != nil { imap.Warning("Authentication error (%s). Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } if !authSuccess { imap.Info("Authentication failed. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER") errCh <- LongPollReRegister return } } if imap.pi.IMAPSupportsIdle { imap.Debug("Supporting idle. Running Examine Command") err := imap.doExamine() if err != nil { imap.Warning("Examine failure: %v. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } } imap.Info("Request timeout %d|msgCode=IMAP_POLL_REQ_TIMEDOUT_VALUE", imap.pi.ResponseTimeout) requestTimer := time.NewTimer(time.Duration(imap.pi.ResponseTimeout) * time.Millisecond) responseCh := make(chan []string) responseErrCh := make(chan error) command := IMAP_NOOP if imap.pi.IMAPSupportsIdle { command = fmt.Sprintf("%s %s", imap.tag.Next(), IMAP_IDLE) } else { command = fmt.Sprintf("%s %s %s %s", imap.tag.Next(), IMAP_STATUS, imap.pi.IMAPFolderName, IMAP_STATUS_QUERY) } go imap.doRequestResponse(command, responseCh, responseErrCh) select { case <-requestTimer.C: // request timed out. Start over. imap.Info("Request timed out. Starting over|msgCode=IMAP_POLL_REQ_TIMEDOUT") requestTimer.Stop() imap.cancelIDLE() sleepTime = 1 case err := <-responseErrCh: if err == IOTimeoutError { // just retry on an I/O Timeout. No need for the device to re-register sleepTime = 1 } else { imap.Info("Got error %s. Sending back LongPollReRegister|msgCode=IMAP_ERR_REREGISTER", err) errCh <- LongPollReRegister // erroring out... ask for reregister return } return case <-responseCh: if imap.hasNewEmail { imap.Info("Got mail. Sending LongPollNewMail|msgCode=IMAP_NEW_EMAIL") imap.hasNewEmail = false errCh <- LongPollNewMail return } case <-stopPollCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop. Stopping") return case <-stopAllCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop (allStop). Stopping") return } } } func (imap *IMAPClient) cancelIDLE() { if imap.isIdling { imap.Info("Cancelling outstanding IDLE request") err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP command %s while cancelling IDLE request: %s", IMAP_DONE, err) } } } func (imap *IMAPClient) UpdateRequestData(requestData []byte) { if len(requestData) > 0 && bytes.Compare(requestData, imap.pi.RequestData) != 0 { imap.pi.RequestData = requestData } } func (imap *IMAPClient) cancel() { imap.mutex.Lock() imap.cancelled = true if imap.tlsConn != nil { imap.cancelIDLE() imap.tlsConn.Close() imap.tlsConn = nil } imap.mutex.Unlock() } func (imap *IMAPClient) Cleanup() { imap.Debug("Cleaning up") imap.cancel() imap.pi.cleanup() imap.pi = nil }
count, token := imap.parseEXAMINEResponse(response) if token == IMAP_EXISTS {
random_line_split
imap.go
package Pinger import ( "bufio" "bytes" "crypto/tls" "encoding/base64" "errors" "fmt" "github.com/nachocove/Pinger/Utils" "github.com/nachocove/Pinger/Utils/Logging" "math/rand" "net" "net/url" "regexp" "strconv" "strings" "sync" "time" ) // IMAP Commands const ( IMAP_EXISTS string = "EXISTS" IMAP_EXPUNGE string = "EXPUNGE" IMAP_EXAMINE string = "EXAMINE" IMAP_IDLE string = "IDLE" IMAP_DONE string = "DONE" IMAP_NOOP string = "NOOP" IMAP_UIDNEXT string = "[UIDNEXT" IMAP_STATUS string = "STATUS" IMAP_STATUS_QUERY string = "(MESSAGES UIDNEXT)" ) // Timeout values for the Dial functions. const ( netTimeout = 30 * time.Second // Time to establish a TCP connection POLLING_INTERVAL = 30 replyTimeout = 300 * time.Second // Time to wait on server response ) type cmdTag struct { id []byte seq uint64 } type IMAPClient struct { debug bool logger *Logging.Logger pi *MailPingInformation wg *sync.WaitGroup mutex *sync.Mutex cancelled bool url *url.URL tlsConfig *tls.Config tlsConn *tls.Conn scanner *bufio.Scanner tag *cmdTag isIdling bool hasNewEmail bool } var prng *rand.Rand var commandTerminator []byte var IOTimeoutError error func init() { prng = rand.New(&prngSource{src: rand.NewSource(time.Now().UnixNano())}) commandTerminator = []byte("\r\n") IOTimeoutError = fmt.Errorf("I/O Timeout Error") } func (imap *IMAPClient) getLogPrefix() string { return imap.pi.getLogPrefix() + "|protocol=IMAP" + "|tag=" + string(imap.tag.id) + ":" + strconv.FormatUint(imap.tag.seq, 10) } func (imap *IMAPClient) Debug(format string, args ...interface{}) { imap.logger.Debug(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Info(format string, args ...interface{}) { imap.logger.Info(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Error(format string, args ...interface{}) { imap.logger.Error(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Warning(format string, args ...interface{}) { imap.logger.Warning(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func NewIMAPClient(pi *MailPingInformation, wg *sync.WaitGroup, debug bool, logger *Logging.Logger) (*IMAPClient, error) { imap := IMAPClient{ debug: debug, logger: logger.Copy(), pi: pi, wg: wg, mutex: &sync.Mutex{}, cancelled: false, tag: genNewCmdTag(0), } imap.logger.SetCallDepth(1) imap.Info("Created new IMAP Client|msgCode=IMAP_CLIENT_CREATED") return &imap, nil } func (imap *IMAPClient) sendError(errCh chan error, err error) { logError(err, imap.logger) errCh <- err } type prngSource struct { mu sync.Mutex src rand.Source } func (r *prngSource) Int63() (n int64) { r.mu.Lock() n = r.src.Int63() r.mu.Unlock() return } func (r *prngSource) Seed(seed int64) { r.mu.Lock() r.src.Seed(seed) r.mu.Unlock() } func genNewCmdTag(n uint) *cmdTag { if n < 1 || 26 < n { n = 5 } id := make([]byte, n, n+20) for i, v := range prng.Perm(26)[:n] { id[i] = 'A' + byte(v) } return &cmdTag{id, 0} } func (t *cmdTag) Next() string { t.seq++ return string(strconv.AppendUint(t.id, t.seq, 10)) } func (t *cmdTag) String() string { return fmt.Sprintf("%s%d", t.id, t.seq) } func (imap *IMAPClient) setupScanner() { imap.scanner = bufio.NewScanner(imap.tlsConn) imap.scanner.Split(bufio.ScanLines) } func (imap *IMAPClient) isContinueResponse(response string) bool { if len(response) > 0 && response[0] == '+' { return true } else { return false } } func (imap *IMAPClient) isOKResponse(response string) bool { tokens := strings.Split(response, " ") if len(tokens) >= 2 && tokens[1] == "OK" { return true } else { return false } } func (imap *IMAPClient) handleGreeting() error { imap.Debug("Handle Greeting") response, err := imap.getServerResponse(uint64(replyTimeout / time.Millisecond)) if err == nil { imap.Info("Connected|host=%s|tag=%s", imap.url.Host, imap.tag.id) if imap.isOKResponse(response) { imap.Info("Greeting from server: %s", response) return nil } else { err := fmt.Errorf("Did not get proper response from imap server|err=%s", response) return err } } return err } func (imap *IMAPClient) doImapAuth() (authSucess bool, err error) { imap.Info("Authenticating with authblob") decodedBlob, err := base64.StdEncoding.DecodeString(imap.pi.IMAPAuthenticationBlob) if err != nil { imap.Error("Error decoding AuthBlob") return false, err } responses, err := imap.doIMAPCommand(fmt.Sprintf("%s %s", imap.tag.Next(), decodedBlob), uint64(replyTimeout/time.Millisecond)) if err != nil { return false, err } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if imap.isContinueResponse(lastResponse) { // auth failed imap.Debug("Authentication failed: %s", lastResponse) responses, err = imap.doIMAPCommand(" ", uint64(replyTimeout/time.Millisecond)) } if !imap.isOKResponse(lastResponse) { return false, err } } imap.Debug("Authentication successful|msgCode=IMAP_AUTH_SUCCESS") return true, nil } func (imap *IMAPClient) parseEXAMINEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") valueToken := "" if tokens[0] == "*" && tokens[2] == IMAP_EXISTS { valueToken = tokens[1] } else if tokens[0] == "*" && tokens[2] == IMAP_UIDNEXT { valueToken = tokens[3][:len(tokens[3])-1] } if valueToken != "" { value, err := strconv.Atoi(valueToken) if err != nil { imap.Warning("Cannot parse value from response : %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } //* STATUS "INBOX" (MESSAGES 18 UIDNEXT 41) func (imap *IMAPClient) parseSTATUSResponse(response string) (uint32, uint32) { re := regexp.MustCompile(".*(MESSAGES (?P<messageCount>[0-9]+) UIDNEXT (?P<UIDNext>[0-9]+))") r2 := re.FindStringSubmatch(response) if len(r2) == 0 { return 0, 0 } messageCountStr := r2[2] UIDNextStr := r2[3] messageCount, err := strconv.Atoi(messageCountStr) if err != nil { imap.Warning("Cannot parse value from %s", messageCountStr) messageCount = 0 } UIDNext, err := strconv.Atoi(UIDNextStr) if err != nil { imap.Warning("Cannot parse value from %s", UIDNextStr) UIDNext = 0 } return uint32(messageCount), uint32(UIDNext) } func (imap *IMAPClient) parseIDLEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") if tokens[0] == "*" && (tokens[2] == IMAP_EXISTS || tokens[2] == IMAP_EXPUNGE) { value, err := strconv.Atoi(tokens[1]) if err != nil { imap.Warning("Cannot parse value from %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } func (imap *IMAPClient) doExamine() error { command := fmt.Sprintf("%s %s %s", imap.tag.Next(), IMAP_EXAMINE, imap.pi.IMAPFolderName) imap.Debug("IMAPFolder=%s", imap.pi.IMAPFolderName) _, err := imap.doIMAPCommand(command, uint64(replyTimeout/time.Millisecond)) return err } func (imap *IMAPClient) sendIMAPCommand(command string) error { commandName := imap.getNameFromCommand(command) imap.Info("Sending IMAP Command to server|command=%s|msgCode=IMAP_COMMAND_SENT", commandName) //imap.Debug("Sending IMAP Command to server:[%s]", command) if commandName == "IDLE" { imap.Info("Setting isIdling to true.") imap.isIdling = true } if len(command) > 0 { _, err := imap.tlsConn.Write([]byte(command)) if err != nil { return err } _, err = imap.tlsConn.Write(commandTerminator) if err != nil { return err } } return nil } func (imap *IMAPClient) doIMAPCommand(command string, waitTime uint64) ([]string, error) { commandLines := strings.Split(command, "\n") var allResponses []string var err error for _, commandLine := range commandLines { err := imap.sendIMAPCommand(commandLine) if err != nil { imap.Warning("%s", err) return nil, err } if imap.cancelled == true { imap.Info("IMAP Command. Request cancelled. Exiting|msgCode=IMAP_COMMAND_CANCELLED") err = fmt.Errorf("Request cancelled") return nil, err } responses, err := imap.getServerResponses(command, waitTime) if err != nil { return nil, err } if allResponses == nil { allResponses = responses } else { allResponses = append(allResponses, responses...) } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if !imap.isOKResponse(lastResponse) && !imap.isContinueResponse(lastResponse) { err := fmt.Errorf("Did not get proper response from imap server: %s", lastResponse) imap.Debug("%s", err) return allResponses, err } } else { err := fmt.Errorf("Did not get any response from imap server.") imap.Debug("%s", err) return allResponses, err } } return allResponses, err } func (imap *IMAPClient) processResponse(command string, response string) { commandName := imap.getNameFromCommand(command) switch commandName { case "IDLE": imap.Debug("Processing IDLE Response: [%s]", response) count, token := imap.parseIDLEResponse(response) if token == IMAP_EXPUNGE { imap.pi.IMAPEXISTSCount -= 1 imap.Info("%s received. Decrementing count|IMAPEXISTSCount=%d", IMAP_EXPUNGE, imap.pi.IMAPEXISTSCount) } else if token == IMAP_EXISTS && count != imap.pi.IMAPEXISTSCount { imap.Info("Current EXISTS count is different from starting EXISTS count."+ "Resetting count|currentIMAPEXISTSCount=%d|startingIMAPExistsCount=%d", count, imap.pi.IMAPEXISTSCount) imap.Info("Got new mail. Stopping IDLE|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPEXISTSCount = count err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP Command|command=%s|err=%s", IMAP_DONE, err) } } case "EXAMINE": imap.Debug("Processing EXAMINE Response: [%s]", response) count, token := imap.parseEXAMINEResponse(response) if token == IMAP_EXISTS { imap.Info("Saving starting EXISTS count|IMAPEXISTSCount=%d||msgCode=IMAP_STARTING_EXISTS_COUNT", count) imap.pi.IMAPEXISTSCount = count } else if token == IMAP_UIDNEXT { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", count) imap.pi.IMAPUIDNEXT = count } case "STATUS": imap.Debug("Processing STATUS Response: [%s]", response) _, UIDNext := imap.parseSTATUSResponse(response) if UIDNext != 0 { if imap.pi.IMAPUIDNEXT == 0 { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", UIDNext) imap.pi.IMAPUIDNEXT = UIDNext } else if UIDNext != imap.pi.IMAPUIDNEXT { imap.Info("Current UIDNext is different from starting UIDNext."+ " Resetting UIDNext|currentUIDNext=%d|startingUIDNext=%d|msgCode=IMAP_RESET_UIDNEXT", UIDNext, imap.pi.IMAPUIDNEXT) imap.Info("Got new mail|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPUIDNEXT = UIDNext } else { imap.Debug("Current UIDNext is the same as starting UIDNext|currentUIDNext=%d|startingUIDNext=%d", UIDNext, imap.pi.IMAPUIDNEXT) } } } } func (imap *IMAPClient) isFinalResponse(command string, response string) bool { tokens := strings.Split(command, " ") if len(response) >= 2 && response[0:2] == "+ " && imap.getNameFromCommand(command) != "IDLE" { return true } else if len(tokens) > 0 { token := tokens[0] if len(response) >= len(token) && token == response[0:len(token)] { return true } } return false } func (imap *IMAPClient) getNameFromCommand(command string) string { commandTokens := strings.Split(command, " ") if len(commandTokens) > 1 { return commandTokens[1] } return "" } func (imap *IMAPClient) getServerResponses(command string, waitTime uint64) ([]string, error) { completed := false responses := make([]string, 0) imap.Debug("Getting Server Responses") for completed == false { if imap.getNameFromCommand(command) == "IDLE" { waitTime = 0 imap.Debug("IDLE Command|timeout=%d", waitTime) } response, err := imap.getServerResponse(waitTime) if err != nil { imap.Debug("Returning err %s", err) return responses, err } else { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("<%s command response redacted>", imap.getNameFromCommand(command)) } else { imap.Debug("IMAP Server Response is %s", response) } responses = append(responses, response) imap.processResponse(command, response) if imap.isFinalResponse(command, response) { if imap.getNameFromCommand(command) == "IDLE" { imap.Info("Setting isIdling to false|msgCode=IMAP_STOP_IDLE") imap.isIdling = false } for i, r := range responses { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("%d: <%s command response redacted>", i, imap.getNameFromCommand(command)) } else { imap.Debug("%d: %s", i, r) } } break } } } return responses, nil } func (imap *IMAPClient) getServerResponse(waitTime uint64) (string, error) { imap.Debug("Getting server response|timeout=%d", waitTime) if waitTime > 0 { waitUntil := time.Now().Add(time.Duration(waitTime) * time.Millisecond) imap.tlsConn.SetReadDeadline(waitUntil) } for i := 0; ; i++ { ok := imap.scanner.Scan() if ok { break } else { err := imap.scanner.Err() if err == nil { return "", errors.New("EOF received") } nerr, ok := err.(net.Error) if ok && nerr.Timeout() { imap.Debug("Timeout error|err=%s", nerr) return "", err } else if ok && nerr.Temporary() { if i < 3 { // try three times imap.Info("Temporary error scanning for server response: %s. Will retry...", nerr) time.Sleep(time.Duration(1) * time.Second) } else { imap.Debug("Error scanning for server response: %s.", nerr) return "", err } } else { imap.Debug("Error scanning for server response: %s.", err) return "", err } } } response := imap.scanner.Text() return response, nil } func (imap *IMAPClient) doRequestResponse(request string, responseCh chan []string, responseErrCh chan error) { imap.Debug("Starting doRequestResponse") imap.wg.Add(1) defer Utils.RecoverCrash(imap.logger) imap.mutex.Lock() // prevents the longpoll from cancelling the request while we're still setting it up. unlockMutex := true defer func() { imap.Debug("Exiting doRequestResponse") imap.wg.Done() if unlockMutex { imap.mutex.Unlock() } }() var err error if imap == nil || imap.pi == nil { if imap.logger != nil { imap.Info("doRequestResponse called but structures cleaned up") } return } if imap.tlsConn == nil { imap.Info("doRequestResponse called but tls connection has been cleaned up") return } imap.mutex.Unlock() unlockMutex = false imap.Debug("Executing IMAP Command|timeout=%d", uint64(replyTimeout/time.Millisecond)) responses, err := imap.doIMAPCommand(request, uint64(replyTimeout/time.Millisecond)) if imap.cancelled == true
if err != nil { if imap.isIdling { imap.isIdling = false } imap.Info("Request/Response Error: %s", err) nerr, ok := err.(net.Error) if ok && nerr.Timeout() { responseErrCh <- IOTimeoutError; } else { responseErrCh <- fmt.Errorf("Request/Response Error: %s", err) } return } responseCh <- responses return } func (imap *IMAPClient) setupConn() error { imap.Debug("Setting up TLS connection") if imap.tlsConn != nil { imap.tlsConn.Close() } if imap.url == nil { imapUrl, err := url.Parse(imap.pi.MailServerUrl) if err != nil { imap.Warning("err %s", err) return err } imap.url = imapUrl } host, _, _ := net.SplitHostPort(imap.url.Host) if imap.tlsConfig == nil { imap.tlsConfig = &tls.Config{ ServerName: host, RootCAs: globals.config.RootCerts(), } } conn, err := net.DialTimeout("tcp", imap.url.Host, netTimeout) if err == nil { imap.tlsConn = tls.Client(conn, imap.tlsConfig) if imap.tlsConn == nil { conn.Close() return fmt.Errorf("Cannot create TLS Connection") } } if err != nil { imap.Warning("err %s", err) return err } imap.setupScanner() err = imap.handleGreeting() if err != nil { imap.Warning("err %s", err) return err } return nil } func (imap *IMAPClient) LongPoll(stopPollCh, stopAllCh chan int, errCh chan error) { imap.Info("Starting LongPoll|msgCode=POLLING") if imap.isIdling { imap.Warning("Already idling. Returning|msgCode=IMAP_ALREADY_POLLING") return } imap.wg.Add(1) defer imap.wg.Done() defer Utils.RecoverCrash(imap.logger) defer func() { imap.Info("Stopping LongPoll.") imap.cancel() }() sleepTime := 0 if imap.pi.IMAPSupportsIdle { imap.Debug("IMAP Server supports IDLE") } else { imap.Debug("IMAP Server doesn't support IDLE. Resetting IMAP UIDNEXT|IMAPUIDNEXT=0|msgCode") } imap.pi.IMAPUIDNEXT = 0 for { if sleepTime > 0 { s := time.Duration(sleepTime) * time.Second imap.Debug("Sleeping %s before retry", s) time.Sleep(s) } sleepTime = POLLING_INTERVAL if imap.tlsConn == nil { err := imap.setupConn() if err != nil { imap.Error("Connection setup error: %v", err) errCh <- LongPollReRegister return } authSuccess, err := imap.doImapAuth() if err != nil { imap.Warning("Authentication error (%s). Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } if !authSuccess { imap.Info("Authentication failed. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER") errCh <- LongPollReRegister return } } if imap.pi.IMAPSupportsIdle { imap.Debug("Supporting idle. Running Examine Command") err := imap.doExamine() if err != nil { imap.Warning("Examine failure: %v. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } } imap.Info("Request timeout %d|msgCode=IMAP_POLL_REQ_TIMEDOUT_VALUE", imap.pi.ResponseTimeout) requestTimer := time.NewTimer(time.Duration(imap.pi.ResponseTimeout) * time.Millisecond) responseCh := make(chan []string) responseErrCh := make(chan error) command := IMAP_NOOP if imap.pi.IMAPSupportsIdle { command = fmt.Sprintf("%s %s", imap.tag.Next(), IMAP_IDLE) } else { command = fmt.Sprintf("%s %s %s %s", imap.tag.Next(), IMAP_STATUS, imap.pi.IMAPFolderName, IMAP_STATUS_QUERY) } go imap.doRequestResponse(command, responseCh, responseErrCh) select { case <-requestTimer.C: // request timed out. Start over. imap.Info("Request timed out. Starting over|msgCode=IMAP_POLL_REQ_TIMEDOUT") requestTimer.Stop() imap.cancelIDLE() sleepTime = 1 case err := <-responseErrCh: if err == IOTimeoutError { // just retry on an I/O Timeout. No need for the device to re-register sleepTime = 1 } else { imap.Info("Got error %s. Sending back LongPollReRegister|msgCode=IMAP_ERR_REREGISTER", err) errCh <- LongPollReRegister // erroring out... ask for reregister return } return case <-responseCh: if imap.hasNewEmail { imap.Info("Got mail. Sending LongPollNewMail|msgCode=IMAP_NEW_EMAIL") imap.hasNewEmail = false errCh <- LongPollNewMail return } case <-stopPollCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop. Stopping") return case <-stopAllCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop (allStop). Stopping") return } } } func (imap *IMAPClient) cancelIDLE() { if imap.isIdling { imap.Info("Cancelling outstanding IDLE request") err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP command %s while cancelling IDLE request: %s", IMAP_DONE, err) } } } func (imap *IMAPClient) UpdateRequestData(requestData []byte) { if len(requestData) > 0 && bytes.Compare(requestData, imap.pi.RequestData) != 0 { imap.pi.RequestData = requestData } } func (imap *IMAPClient) cancel() { imap.mutex.Lock() imap.cancelled = true if imap.tlsConn != nil { imap.cancelIDLE() imap.tlsConn.Close() imap.tlsConn = nil } imap.mutex.Unlock() } func (imap *IMAPClient) Cleanup() { imap.Debug("Cleaning up") imap.cancel() imap.pi.cleanup() imap.pi = nil }
{ imap.Info("IMAP Request cancelled. Exiting|msgCode=IMAP_REQ_CANCELLED") return }
conditional_block
imap.go
package Pinger import ( "bufio" "bytes" "crypto/tls" "encoding/base64" "errors" "fmt" "github.com/nachocove/Pinger/Utils" "github.com/nachocove/Pinger/Utils/Logging" "math/rand" "net" "net/url" "regexp" "strconv" "strings" "sync" "time" ) // IMAP Commands const ( IMAP_EXISTS string = "EXISTS" IMAP_EXPUNGE string = "EXPUNGE" IMAP_EXAMINE string = "EXAMINE" IMAP_IDLE string = "IDLE" IMAP_DONE string = "DONE" IMAP_NOOP string = "NOOP" IMAP_UIDNEXT string = "[UIDNEXT" IMAP_STATUS string = "STATUS" IMAP_STATUS_QUERY string = "(MESSAGES UIDNEXT)" ) // Timeout values for the Dial functions. const ( netTimeout = 30 * time.Second // Time to establish a TCP connection POLLING_INTERVAL = 30 replyTimeout = 300 * time.Second // Time to wait on server response ) type cmdTag struct { id []byte seq uint64 } type IMAPClient struct { debug bool logger *Logging.Logger pi *MailPingInformation wg *sync.WaitGroup mutex *sync.Mutex cancelled bool url *url.URL tlsConfig *tls.Config tlsConn *tls.Conn scanner *bufio.Scanner tag *cmdTag isIdling bool hasNewEmail bool } var prng *rand.Rand var commandTerminator []byte var IOTimeoutError error func init() { prng = rand.New(&prngSource{src: rand.NewSource(time.Now().UnixNano())}) commandTerminator = []byte("\r\n") IOTimeoutError = fmt.Errorf("I/O Timeout Error") } func (imap *IMAPClient) getLogPrefix() string { return imap.pi.getLogPrefix() + "|protocol=IMAP" + "|tag=" + string(imap.tag.id) + ":" + strconv.FormatUint(imap.tag.seq, 10) } func (imap *IMAPClient) Debug(format string, args ...interface{}) { imap.logger.Debug(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Info(format string, args ...interface{}) { imap.logger.Info(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Error(format string, args ...interface{}) { imap.logger.Error(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func (imap *IMAPClient) Warning(format string, args ...interface{}) { imap.logger.Warning(fmt.Sprintf("%s|message=%s", imap.getLogPrefix(), format), args...) } func NewIMAPClient(pi *MailPingInformation, wg *sync.WaitGroup, debug bool, logger *Logging.Logger) (*IMAPClient, error) { imap := IMAPClient{ debug: debug, logger: logger.Copy(), pi: pi, wg: wg, mutex: &sync.Mutex{}, cancelled: false, tag: genNewCmdTag(0), } imap.logger.SetCallDepth(1) imap.Info("Created new IMAP Client|msgCode=IMAP_CLIENT_CREATED") return &imap, nil } func (imap *IMAPClient) sendError(errCh chan error, err error) { logError(err, imap.logger) errCh <- err } type prngSource struct { mu sync.Mutex src rand.Source } func (r *prngSource) Int63() (n int64) { r.mu.Lock() n = r.src.Int63() r.mu.Unlock() return } func (r *prngSource) Seed(seed int64) { r.mu.Lock() r.src.Seed(seed) r.mu.Unlock() } func genNewCmdTag(n uint) *cmdTag { if n < 1 || 26 < n { n = 5 } id := make([]byte, n, n+20) for i, v := range prng.Perm(26)[:n] { id[i] = 'A' + byte(v) } return &cmdTag{id, 0} } func (t *cmdTag) Next() string
func (t *cmdTag) String() string { return fmt.Sprintf("%s%d", t.id, t.seq) } func (imap *IMAPClient) setupScanner() { imap.scanner = bufio.NewScanner(imap.tlsConn) imap.scanner.Split(bufio.ScanLines) } func (imap *IMAPClient) isContinueResponse(response string) bool { if len(response) > 0 && response[0] == '+' { return true } else { return false } } func (imap *IMAPClient) isOKResponse(response string) bool { tokens := strings.Split(response, " ") if len(tokens) >= 2 && tokens[1] == "OK" { return true } else { return false } } func (imap *IMAPClient) handleGreeting() error { imap.Debug("Handle Greeting") response, err := imap.getServerResponse(uint64(replyTimeout / time.Millisecond)) if err == nil { imap.Info("Connected|host=%s|tag=%s", imap.url.Host, imap.tag.id) if imap.isOKResponse(response) { imap.Info("Greeting from server: %s", response) return nil } else { err := fmt.Errorf("Did not get proper response from imap server|err=%s", response) return err } } return err } func (imap *IMAPClient) doImapAuth() (authSucess bool, err error) { imap.Info("Authenticating with authblob") decodedBlob, err := base64.StdEncoding.DecodeString(imap.pi.IMAPAuthenticationBlob) if err != nil { imap.Error("Error decoding AuthBlob") return false, err } responses, err := imap.doIMAPCommand(fmt.Sprintf("%s %s", imap.tag.Next(), decodedBlob), uint64(replyTimeout/time.Millisecond)) if err != nil { return false, err } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if imap.isContinueResponse(lastResponse) { // auth failed imap.Debug("Authentication failed: %s", lastResponse) responses, err = imap.doIMAPCommand(" ", uint64(replyTimeout/time.Millisecond)) } if !imap.isOKResponse(lastResponse) { return false, err } } imap.Debug("Authentication successful|msgCode=IMAP_AUTH_SUCCESS") return true, nil } func (imap *IMAPClient) parseEXAMINEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") valueToken := "" if tokens[0] == "*" && tokens[2] == IMAP_EXISTS { valueToken = tokens[1] } else if tokens[0] == "*" && tokens[2] == IMAP_UIDNEXT { valueToken = tokens[3][:len(tokens[3])-1] } if valueToken != "" { value, err := strconv.Atoi(valueToken) if err != nil { imap.Warning("Cannot parse value from response : %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } //* STATUS "INBOX" (MESSAGES 18 UIDNEXT 41) func (imap *IMAPClient) parseSTATUSResponse(response string) (uint32, uint32) { re := regexp.MustCompile(".*(MESSAGES (?P<messageCount>[0-9]+) UIDNEXT (?P<UIDNext>[0-9]+))") r2 := re.FindStringSubmatch(response) if len(r2) == 0 { return 0, 0 } messageCountStr := r2[2] UIDNextStr := r2[3] messageCount, err := strconv.Atoi(messageCountStr) if err != nil { imap.Warning("Cannot parse value from %s", messageCountStr) messageCount = 0 } UIDNext, err := strconv.Atoi(UIDNextStr) if err != nil { imap.Warning("Cannot parse value from %s", UIDNextStr) UIDNext = 0 } return uint32(messageCount), uint32(UIDNext) } func (imap *IMAPClient) parseIDLEResponse(response string) (value uint32, token string) { tokens := strings.Split(response, " ") if tokens[0] == "*" && (tokens[2] == IMAP_EXISTS || tokens[2] == IMAP_EXPUNGE) { value, err := strconv.Atoi(tokens[1]) if err != nil { imap.Warning("Cannot parse value from %s", response) } else { return uint32(value), tokens[2] } } return 0, "" } func (imap *IMAPClient) doExamine() error { command := fmt.Sprintf("%s %s %s", imap.tag.Next(), IMAP_EXAMINE, imap.pi.IMAPFolderName) imap.Debug("IMAPFolder=%s", imap.pi.IMAPFolderName) _, err := imap.doIMAPCommand(command, uint64(replyTimeout/time.Millisecond)) return err } func (imap *IMAPClient) sendIMAPCommand(command string) error { commandName := imap.getNameFromCommand(command) imap.Info("Sending IMAP Command to server|command=%s|msgCode=IMAP_COMMAND_SENT", commandName) //imap.Debug("Sending IMAP Command to server:[%s]", command) if commandName == "IDLE" { imap.Info("Setting isIdling to true.") imap.isIdling = true } if len(command) > 0 { _, err := imap.tlsConn.Write([]byte(command)) if err != nil { return err } _, err = imap.tlsConn.Write(commandTerminator) if err != nil { return err } } return nil } func (imap *IMAPClient) doIMAPCommand(command string, waitTime uint64) ([]string, error) { commandLines := strings.Split(command, "\n") var allResponses []string var err error for _, commandLine := range commandLines { err := imap.sendIMAPCommand(commandLine) if err != nil { imap.Warning("%s", err) return nil, err } if imap.cancelled == true { imap.Info("IMAP Command. Request cancelled. Exiting|msgCode=IMAP_COMMAND_CANCELLED") err = fmt.Errorf("Request cancelled") return nil, err } responses, err := imap.getServerResponses(command, waitTime) if err != nil { return nil, err } if allResponses == nil { allResponses = responses } else { allResponses = append(allResponses, responses...) } if len(responses) > 0 { lastResponse := responses[len(responses)-1] if !imap.isOKResponse(lastResponse) && !imap.isContinueResponse(lastResponse) { err := fmt.Errorf("Did not get proper response from imap server: %s", lastResponse) imap.Debug("%s", err) return allResponses, err } } else { err := fmt.Errorf("Did not get any response from imap server.") imap.Debug("%s", err) return allResponses, err } } return allResponses, err } func (imap *IMAPClient) processResponse(command string, response string) { commandName := imap.getNameFromCommand(command) switch commandName { case "IDLE": imap.Debug("Processing IDLE Response: [%s]", response) count, token := imap.parseIDLEResponse(response) if token == IMAP_EXPUNGE { imap.pi.IMAPEXISTSCount -= 1 imap.Info("%s received. Decrementing count|IMAPEXISTSCount=%d", IMAP_EXPUNGE, imap.pi.IMAPEXISTSCount) } else if token == IMAP_EXISTS && count != imap.pi.IMAPEXISTSCount { imap.Info("Current EXISTS count is different from starting EXISTS count."+ "Resetting count|currentIMAPEXISTSCount=%d|startingIMAPExistsCount=%d", count, imap.pi.IMAPEXISTSCount) imap.Info("Got new mail. Stopping IDLE|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPEXISTSCount = count err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP Command|command=%s|err=%s", IMAP_DONE, err) } } case "EXAMINE": imap.Debug("Processing EXAMINE Response: [%s]", response) count, token := imap.parseEXAMINEResponse(response) if token == IMAP_EXISTS { imap.Info("Saving starting EXISTS count|IMAPEXISTSCount=%d||msgCode=IMAP_STARTING_EXISTS_COUNT", count) imap.pi.IMAPEXISTSCount = count } else if token == IMAP_UIDNEXT { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", count) imap.pi.IMAPUIDNEXT = count } case "STATUS": imap.Debug("Processing STATUS Response: [%s]", response) _, UIDNext := imap.parseSTATUSResponse(response) if UIDNext != 0 { if imap.pi.IMAPUIDNEXT == 0 { imap.Info("Setting starting IMAPUIDNEXT|IMAPUIDNEXT=%d", UIDNext) imap.pi.IMAPUIDNEXT = UIDNext } else if UIDNext != imap.pi.IMAPUIDNEXT { imap.Info("Current UIDNext is different from starting UIDNext."+ " Resetting UIDNext|currentUIDNext=%d|startingUIDNext=%d|msgCode=IMAP_RESET_UIDNEXT", UIDNext, imap.pi.IMAPUIDNEXT) imap.Info("Got new mail|msgCode=IMAP_NEW_MAIL") imap.hasNewEmail = true imap.pi.IMAPUIDNEXT = UIDNext } else { imap.Debug("Current UIDNext is the same as starting UIDNext|currentUIDNext=%d|startingUIDNext=%d", UIDNext, imap.pi.IMAPUIDNEXT) } } } } func (imap *IMAPClient) isFinalResponse(command string, response string) bool { tokens := strings.Split(command, " ") if len(response) >= 2 && response[0:2] == "+ " && imap.getNameFromCommand(command) != "IDLE" { return true } else if len(tokens) > 0 { token := tokens[0] if len(response) >= len(token) && token == response[0:len(token)] { return true } } return false } func (imap *IMAPClient) getNameFromCommand(command string) string { commandTokens := strings.Split(command, " ") if len(commandTokens) > 1 { return commandTokens[1] } return "" } func (imap *IMAPClient) getServerResponses(command string, waitTime uint64) ([]string, error) { completed := false responses := make([]string, 0) imap.Debug("Getting Server Responses") for completed == false { if imap.getNameFromCommand(command) == "IDLE" { waitTime = 0 imap.Debug("IDLE Command|timeout=%d", waitTime) } response, err := imap.getServerResponse(waitTime) if err != nil { imap.Debug("Returning err %s", err) return responses, err } else { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("<%s command response redacted>", imap.getNameFromCommand(command)) } else { imap.Debug("IMAP Server Response is %s", response) } responses = append(responses, response) imap.processResponse(command, response) if imap.isFinalResponse(command, response) { if imap.getNameFromCommand(command) == "IDLE" { imap.Info("Setting isIdling to false|msgCode=IMAP_STOP_IDLE") imap.isIdling = false } for i, r := range responses { if imap.getNameFromCommand(command) == "AUTHENTICATE" { imap.Debug("%d: <%s command response redacted>", i, imap.getNameFromCommand(command)) } else { imap.Debug("%d: %s", i, r) } } break } } } return responses, nil } func (imap *IMAPClient) getServerResponse(waitTime uint64) (string, error) { imap.Debug("Getting server response|timeout=%d", waitTime) if waitTime > 0 { waitUntil := time.Now().Add(time.Duration(waitTime) * time.Millisecond) imap.tlsConn.SetReadDeadline(waitUntil) } for i := 0; ; i++ { ok := imap.scanner.Scan() if ok { break } else { err := imap.scanner.Err() if err == nil { return "", errors.New("EOF received") } nerr, ok := err.(net.Error) if ok && nerr.Timeout() { imap.Debug("Timeout error|err=%s", nerr) return "", err } else if ok && nerr.Temporary() { if i < 3 { // try three times imap.Info("Temporary error scanning for server response: %s. Will retry...", nerr) time.Sleep(time.Duration(1) * time.Second) } else { imap.Debug("Error scanning for server response: %s.", nerr) return "", err } } else { imap.Debug("Error scanning for server response: %s.", err) return "", err } } } response := imap.scanner.Text() return response, nil } func (imap *IMAPClient) doRequestResponse(request string, responseCh chan []string, responseErrCh chan error) { imap.Debug("Starting doRequestResponse") imap.wg.Add(1) defer Utils.RecoverCrash(imap.logger) imap.mutex.Lock() // prevents the longpoll from cancelling the request while we're still setting it up. unlockMutex := true defer func() { imap.Debug("Exiting doRequestResponse") imap.wg.Done() if unlockMutex { imap.mutex.Unlock() } }() var err error if imap == nil || imap.pi == nil { if imap.logger != nil { imap.Info("doRequestResponse called but structures cleaned up") } return } if imap.tlsConn == nil { imap.Info("doRequestResponse called but tls connection has been cleaned up") return } imap.mutex.Unlock() unlockMutex = false imap.Debug("Executing IMAP Command|timeout=%d", uint64(replyTimeout/time.Millisecond)) responses, err := imap.doIMAPCommand(request, uint64(replyTimeout/time.Millisecond)) if imap.cancelled == true { imap.Info("IMAP Request cancelled. Exiting|msgCode=IMAP_REQ_CANCELLED") return } if err != nil { if imap.isIdling { imap.isIdling = false } imap.Info("Request/Response Error: %s", err) nerr, ok := err.(net.Error) if ok && nerr.Timeout() { responseErrCh <- IOTimeoutError; } else { responseErrCh <- fmt.Errorf("Request/Response Error: %s", err) } return } responseCh <- responses return } func (imap *IMAPClient) setupConn() error { imap.Debug("Setting up TLS connection") if imap.tlsConn != nil { imap.tlsConn.Close() } if imap.url == nil { imapUrl, err := url.Parse(imap.pi.MailServerUrl) if err != nil { imap.Warning("err %s", err) return err } imap.url = imapUrl } host, _, _ := net.SplitHostPort(imap.url.Host) if imap.tlsConfig == nil { imap.tlsConfig = &tls.Config{ ServerName: host, RootCAs: globals.config.RootCerts(), } } conn, err := net.DialTimeout("tcp", imap.url.Host, netTimeout) if err == nil { imap.tlsConn = tls.Client(conn, imap.tlsConfig) if imap.tlsConn == nil { conn.Close() return fmt.Errorf("Cannot create TLS Connection") } } if err != nil { imap.Warning("err %s", err) return err } imap.setupScanner() err = imap.handleGreeting() if err != nil { imap.Warning("err %s", err) return err } return nil } func (imap *IMAPClient) LongPoll(stopPollCh, stopAllCh chan int, errCh chan error) { imap.Info("Starting LongPoll|msgCode=POLLING") if imap.isIdling { imap.Warning("Already idling. Returning|msgCode=IMAP_ALREADY_POLLING") return } imap.wg.Add(1) defer imap.wg.Done() defer Utils.RecoverCrash(imap.logger) defer func() { imap.Info("Stopping LongPoll.") imap.cancel() }() sleepTime := 0 if imap.pi.IMAPSupportsIdle { imap.Debug("IMAP Server supports IDLE") } else { imap.Debug("IMAP Server doesn't support IDLE. Resetting IMAP UIDNEXT|IMAPUIDNEXT=0|msgCode") } imap.pi.IMAPUIDNEXT = 0 for { if sleepTime > 0 { s := time.Duration(sleepTime) * time.Second imap.Debug("Sleeping %s before retry", s) time.Sleep(s) } sleepTime = POLLING_INTERVAL if imap.tlsConn == nil { err := imap.setupConn() if err != nil { imap.Error("Connection setup error: %v", err) errCh <- LongPollReRegister return } authSuccess, err := imap.doImapAuth() if err != nil { imap.Warning("Authentication error (%s). Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } if !authSuccess { imap.Info("Authentication failed. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER") errCh <- LongPollReRegister return } } if imap.pi.IMAPSupportsIdle { imap.Debug("Supporting idle. Running Examine Command") err := imap.doExamine() if err != nil { imap.Warning("Examine failure: %v. Telling client to re-register|msgCode=IMAP_AUTH_FAIL_REREGISTER", err) errCh <- LongPollReRegister return } } imap.Info("Request timeout %d|msgCode=IMAP_POLL_REQ_TIMEDOUT_VALUE", imap.pi.ResponseTimeout) requestTimer := time.NewTimer(time.Duration(imap.pi.ResponseTimeout) * time.Millisecond) responseCh := make(chan []string) responseErrCh := make(chan error) command := IMAP_NOOP if imap.pi.IMAPSupportsIdle { command = fmt.Sprintf("%s %s", imap.tag.Next(), IMAP_IDLE) } else { command = fmt.Sprintf("%s %s %s %s", imap.tag.Next(), IMAP_STATUS, imap.pi.IMAPFolderName, IMAP_STATUS_QUERY) } go imap.doRequestResponse(command, responseCh, responseErrCh) select { case <-requestTimer.C: // request timed out. Start over. imap.Info("Request timed out. Starting over|msgCode=IMAP_POLL_REQ_TIMEDOUT") requestTimer.Stop() imap.cancelIDLE() sleepTime = 1 case err := <-responseErrCh: if err == IOTimeoutError { // just retry on an I/O Timeout. No need for the device to re-register sleepTime = 1 } else { imap.Info("Got error %s. Sending back LongPollReRegister|msgCode=IMAP_ERR_REREGISTER", err) errCh <- LongPollReRegister // erroring out... ask for reregister return } return case <-responseCh: if imap.hasNewEmail { imap.Info("Got mail. Sending LongPollNewMail|msgCode=IMAP_NEW_EMAIL") imap.hasNewEmail = false errCh <- LongPollNewMail return } case <-stopPollCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop. Stopping") return case <-stopAllCh: // parent will close this, at which point this will trigger. imap.Info("Was told to stop (allStop). Stopping") return } } } func (imap *IMAPClient) cancelIDLE() { if imap.isIdling { imap.Info("Cancelling outstanding IDLE request") err := imap.sendIMAPCommand(IMAP_DONE) if err != nil { imap.Warning("Error sending IMAP command %s while cancelling IDLE request: %s", IMAP_DONE, err) } } } func (imap *IMAPClient) UpdateRequestData(requestData []byte) { if len(requestData) > 0 && bytes.Compare(requestData, imap.pi.RequestData) != 0 { imap.pi.RequestData = requestData } } func (imap *IMAPClient) cancel() { imap.mutex.Lock() imap.cancelled = true if imap.tlsConn != nil { imap.cancelIDLE() imap.tlsConn.Close() imap.tlsConn = nil } imap.mutex.Unlock() } func (imap *IMAPClient) Cleanup() { imap.Debug("Cleaning up") imap.cancel() imap.pi.cleanup() imap.pi = nil }
{ t.seq++ return string(strconv.AppendUint(t.id, t.seq, 10)) }
identifier_body
singer.go
package singer import ( "context" "encoding/json" "errors" "fmt" "github.com/hashicorp/go-multierror" "github.com/jitsucom/jitsu/server/drivers/base" "github.com/jitsucom/jitsu/server/logging" "github.com/jitsucom/jitsu/server/safego" "github.com/jitsucom/jitsu/server/singer" "github.com/jitsucom/jitsu/server/uuid" "go.uber.org/atomic" "io" "io/ioutil" "os/exec" "path" "runtime/debug" "strings" "sync" "time" ) const ( stateFileName = "state.json" configFileName = "config.json" catalogFileName = "catalog.json" propertiesFileName = "properties.json" ) var ( blacklistStreamsByTap = map[string]map[string]bool{ "tap-slack": { "messages": true, }, } errNotReady = errors.New("Singer driver isn't ready yet. Tap is being installed..") ) type Singer struct { sync.RWMutex commands map[string]*exec.Cmd ctx context.Context sourceID string tap string configPath string catalogPath string propertiesPath string statePath string pathToConfigs string tableNamePrefix string streamTableNames map[string]string catalogDiscovered *atomic.Bool closed *atomic.Bool } func
() { base.RegisterDriver(base.SingerType, NewSinger) base.RegisterTestConnectionFunc(base.SingerType, TestSinger) } //NewSinger returns Singer driver and //1. writes json files (config, catalog, properties, state) if string/raw json was provided //2. runs discover and collects catalog.json //2. creates venv //3. in another goroutine: updates pip, install singer tap func NewSinger(ctx context.Context, sourceConfig *base.SourceConfig, collection *base.Collection) (base.Driver, error) { config := &SingerConfig{} err := base.UnmarshalConfig(sourceConfig.Config, config) if err != nil { return nil, err } if err := config.Validate(); err != nil { return nil, err } if singer.Instance == nil { return nil, errors.New("singer-bridge must be configured") } pathToConfigs := path.Join(singer.Instance.VenvDir, sourceConfig.SourceID, config.Tap) if err := logging.EnsureDir(pathToConfigs); err != nil { return nil, fmt.Errorf("Error creating singer venv config dir: %v", err) } //parse singer config as file path configPath, err := parseJSONAsFile(path.Join(pathToConfigs, configFileName), config.Config) if err != nil { return nil, fmt.Errorf("Error parsing singer config [%v]: %v", config.Config, err) } //parse singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), config.Catalog) if err != nil { return nil, fmt.Errorf("Error parsing singer catalog [%v]: %v", config.Catalog, err) } // ** Table names mapping ** tableNameMappings := config.StreamTableNames if catalogPath != "" { //extract table names mapping from catalog.json tableNameMappingsFromCatalog, err := extractTableNamesMapping(catalogPath) if err != nil { logging.Errorf("[%s] Error parsing destination table names from Singer catalog.json: %v", sourceConfig.SourceID, err) } //override configuration for stream, tableName := range tableNameMappingsFromCatalog { tableNameMappings[stream] = tableName } } if len(tableNameMappings) > 0 { b, _ := json.MarshalIndent(tableNameMappings, "", " ") logging.Infof("[%s] configured Singer stream - table names mapping: %s", sourceConfig.SourceID, string(b)) } //parse singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), config.Properties) if err != nil { return nil, fmt.Errorf("Error parsing singer properties [%v]: %v", config.Properties, err) } //parse singer state as file path statePath, err := parseJSONAsFile(path.Join(pathToConfigs, stateFileName), config.InitialState) if err != nil { return nil, fmt.Errorf("Error parsing singer initial state [%v]: %v", config.InitialState, err) } catalogDiscovered := atomic.NewBool(false) if catalogPath != "" || propertiesPath != "" { catalogDiscovered.Store(true) } s := &Singer{ ctx: ctx, commands: map[string]*exec.Cmd{}, sourceID: sourceConfig.SourceID, tap: config.Tap, configPath: configPath, catalogPath: catalogPath, propertiesPath: propertiesPath, statePath: statePath, tableNamePrefix: config.StreamTableNamesPrefix, pathToConfigs: pathToConfigs, streamTableNames: tableNameMappings, catalogDiscovered: catalogDiscovered, closed: atomic.NewBool(false), } safego.Run(s.EnsureTapAndCatalog) return s, nil } //TestSinger tests singer connection (runs discover) if tap has been installed otherwise returns nil func TestSinger(sourceConfig *base.SourceConfig) error { driver, err := NewSinger(context.Background(), sourceConfig, nil) if err != nil { return err } defer driver.Close() singerDriver, _ := driver.(*Singer) ready, _ := singerDriver.Ready() if !ready { return nil } outWriter := logging.NewStringWriter() errWriter := logging.NewStringWriter() command := path.Join(singer.Instance.VenvDir, singerDriver.tap, "bin", singerDriver.tap) err = singer.Instance.ExecCmd(command, outWriter, errWriter, "-c", singerDriver.configPath, "--discover") if err != nil { return fmt.Errorf("Error singer --discover: %v. %s", err, errWriter.String()) } return nil } //EnsureTapAndCatalog ensures Singer tap via singer.Instance // and does discover if catalog wasn't provided func (s *Singer) EnsureTapAndCatalog() { singer.Instance.EnsureTap(s.tap) for { if s.closed.Load() { break } if s.catalogDiscovered.Load() { break } if !singer.Instance.IsTapReady(s.tap) { time.Sleep(time.Second) continue } catalogPath, propertiesPath, err := doDiscover(s.sourceID, s.tap, s.pathToConfigs, s.configPath) if err != nil { logging.Errorf("[%s] Error configuring Singer: %v", s.sourceID, err) time.Sleep(time.Minute) continue } s.catalogPath = catalogPath s.propertiesPath = propertiesPath s.catalogDiscovered.Store(true) return } } //GetTableNamePrefix returns stream table name prefix or sourceID_ func (s *Singer) GetTableNamePrefix() string { //put as prefix + stream if prefix exist if s.tableNamePrefix != "" { return s.tableNamePrefix } return s.sourceID + "_" } //GetCollectionTable unsupported func (s *Singer) GetCollectionTable() string { return "" } func (s *Singer) GetCollectionMetaKey() string { return s.tap } //GetAllAvailableIntervals unsupported func (s *Singer) GetAllAvailableIntervals() ([]*base.TimeInterval, error) { return nil, errors.New("Singer driver doesn't support GetAllAvailableIntervals() func. Please use SingerTask") } //GetObjectsFor unsupported func (s *Singer) GetObjectsFor(interval *base.TimeInterval) ([]map[string]interface{}, error) { return nil, errors.New("Singer driver doesn't support GetObjectsFor() func. Please use SingerTask") } //Ready returns true if catalog is discovered and tap is installed func (s *Singer) Ready() (bool, error) { if s.catalogDiscovered.Load() && singer.Instance.IsTapReady(s.tap) { return true, nil } return false, errNotReady } func (s *Singer) GetTap() string { return s.tap } func (s *Singer) Load(state string, taskLogger logging.TaskLogger, portionConsumer singer.PortionConsumer) error { if s.closed.Load() { return errors.New("Singer has already been closed") } ready, readyErr := s.Ready() if !ready { return readyErr } //update tap if err := singer.Instance.UpdateTap(s.tap); err != nil { return fmt.Errorf("Error updating singer tap [%s]: %v", s.tap, err) } //override initial state with existing one and put it to a file var statePath string var err error if state != "" { statePath, err = parseJSONAsFile(path.Join(singer.Instance.VenvDir, s.sourceID, s.tap, stateFileName), state) if err != nil { return fmt.Errorf("Error parsing singer state %s: %v", state, err) } } else { //put initial state statePath = s.statePath } args := []string{"-c", s.configPath} if s.catalogPath != "" { args = append(args, "--catalog", s.catalogPath) } if s.propertiesPath != "" { args = append(args, "-p", s.propertiesPath) } if statePath != "" { args = append(args, "--state", statePath) } command := path.Join(singer.Instance.VenvDir, s.tap, "bin", s.tap) taskLogger.INFO("exec singer %s %s", command, strings.Join(args, " ")) //exec cmd and analyze response from stdout & stderr syncCmd := exec.Command(command, args...) stdout, _ := syncCmd.StdoutPipe() defer stdout.Close() stderr, _ := syncCmd.StderrPipe() defer stderr.Close() commandID := uuid.New() s.Lock() s.commands[commandID] = syncCmd s.Unlock() defer func() { s.Lock() delete(s.commands, commandID) s.Unlock() }() err = syncCmd.Start() if err != nil { return err } var wg sync.WaitGroup var parsingErr error //writing result (singer writes result to stdout) wg.Add(1) safego.Run(func() { defer wg.Done() defer func() { if r := recover(); r != nil { logging.Error("panic in singer task") logging.Error(string(debug.Stack())) s.logAndKill(taskLogger, syncCmd, r) return } }() parsingErr = singer.StreamParseOutput(stdout, portionConsumer, taskLogger) if parsingErr != nil { s.logAndKill(taskLogger, syncCmd, parsingErr) } }) dualWriter := logging.Dual{FileWriter: taskLogger, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", s.sourceID), singer.Instance.LogWriter)} //writing process logs (singer writes process logs to stderr) wg.Add(1) safego.Run(func() { defer wg.Done() io.Copy(dualWriter, stderr) }) wg.Wait() err = syncCmd.Wait() if err != nil { return err } if parsingErr != nil { return parsingErr } return nil } func (s *Singer) Type() string { return base.SingerType } func (s *Singer) Close() (multiErr error) { s.closed.Store(true) s.Lock() for _, command := range s.commands { logging.Infof("[%s] killing process: %s", s.sourceID, command.String()) if err := command.Process.Kill(); err != nil { multiErr = multierror.Append(multiErr, fmt.Errorf("[%s] Error killing singer sync command: %v", s.sourceID, err)) } } s.Unlock() return multiErr } func (s *Singer) GetStreamTableNameMapping() map[string]string { result := map[string]string{} for name, value := range s.streamTableNames { result[name] = value } return result } func (s *Singer) logAndKill(taskLogger logging.TaskLogger, syncCmd *exec.Cmd, parsingErr interface{}) { taskLogger.ERROR("Parse output error: %v. Process will be killed", parsingErr) logging.Errorf("[%s_%s] parse output error: %v. Process will be killed", s.sourceID, s.tap, parsingErr) killErr := syncCmd.Process.Kill() if killErr != nil { taskLogger.ERROR("Error killing process: %v", killErr) logging.Errorf("[%s_%s] error killing process: %v", s.sourceID, s.tap, killErr) } } //doDiscover discovers tap catalog and returns catalog and properties paths //applies blacklist streams to taps and make other streams {"selected": true} func doDiscover(sourceID, tap, pathToConfigs, configFilePath string) (string, string, error) { if !singer.Instance.IsTapReady(tap) { return "", "", errNotReady } outWriter := logging.NewStringWriter() errStrWriter := logging.NewStringWriter() dualStdErrWriter := logging.Dual{FileWriter: errStrWriter, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", sourceID), singer.Instance.LogWriter)} command := path.Join(singer.Instance.VenvDir, tap, "bin", tap) err := singer.Instance.ExecCmd(command, outWriter, dualStdErrWriter, "-c", configFilePath, "--discover") if err != nil { return "", "", fmt.Errorf("Error singer --discover: %v. %s", err, errStrWriter.String()) } catalog := &SingerRawCatalog{} if err := json.Unmarshal(outWriter.Bytes(), &catalog); err != nil { return "", "", fmt.Errorf("Error unmarshalling catalog %s output: %v", outWriter.String(), err) } blackListStreams, ok := blacklistStreamsByTap[tap] if !ok { blackListStreams = map[string]bool{} } for _, stream := range catalog.Streams { streamName, ok := stream["stream"] if ok { if _, ok := blackListStreams[fmt.Sprint(streamName)]; ok { continue } } else { logging.Warnf("Stream [%v] doesn't have 'stream' name", stream) } //put selected=true into 'schema' schemaStruct, ok := stream["schema"] if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: key 'schema' doesn't exist", outWriter.String()) } schemaObj, ok := schemaStruct.(map[string]interface{}) if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: value under key 'schema' must be object: %T", outWriter.String(), schemaStruct) } schemaObj["selected"] = true //put selected=true into every 'metadata' object metadataArrayIface, ok := stream["metadata"] if ok { metadataArray, ok := metadataArrayIface.([]interface{}) if ok { for _, metadata := range metadataArray { metadataObj, ok := metadata.(map[string]interface{}) if ok { innerMetadata, ok := metadataObj["metadata"] if ok { innerMetadataObj, ok := innerMetadata.(map[string]interface{}) if ok { innerMetadataObj["selected"] = true } } } } } } } b, _ := json.MarshalIndent(catalog, "", " ") //write singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer catalog [%v]: %v", string(b), err) } //write singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer properties [%v]: %v", string(b), err) } return catalogPath, propertiesPath, nil } //parse value and write it to a json file //return path to created json file or return value if it is already path to json file //or empty string if value is nil func parseJSONAsFile(newPath string, value interface{}) (string, error) { if value == nil { return "", nil } switch value.(type) { case map[string]interface{}: payload := value.(map[string]interface{}) b, err := json.Marshal(payload) if err != nil { return "", fmt.Errorf("Malformed value: %v", err) } return newPath, ioutil.WriteFile(newPath, b, 0644) case string: payload := value.(string) if strings.HasPrefix(payload, "{") { return newPath, ioutil.WriteFile(newPath, []byte(payload), 0644) } //already file return payload, nil default: return "", errors.New("Unknown type. Value must be path to json file or raw json") } } func extractTableNamesMapping(catalogPath string) (map[string]string, error) { catalogBytes, err := ioutil.ReadFile(catalogPath) if err != nil { return nil, fmt.Errorf("Error reading catalog file: %v", err) } catalog := &SingerCatalog{} err = json.Unmarshal(catalogBytes, catalog) if err != nil { return nil, err } streamTableNamesMapping := map[string]string{} for _, stream := range catalog.Streams { if stream.DestinationTableName != "" { //add mapping stream if stream.Stream != "" { streamTableNamesMapping[stream.Stream] = stream.DestinationTableName } //add mapping tap_stream_id if stream.TapStreamID != "" { streamTableNamesMapping[stream.TapStreamID] = stream.DestinationTableName } } } return streamTableNamesMapping, nil }
init
identifier_name
singer.go
package singer import ( "context" "encoding/json" "errors" "fmt" "github.com/hashicorp/go-multierror" "github.com/jitsucom/jitsu/server/drivers/base" "github.com/jitsucom/jitsu/server/logging" "github.com/jitsucom/jitsu/server/safego" "github.com/jitsucom/jitsu/server/singer" "github.com/jitsucom/jitsu/server/uuid" "go.uber.org/atomic" "io" "io/ioutil" "os/exec" "path" "runtime/debug" "strings" "sync" "time" ) const ( stateFileName = "state.json" configFileName = "config.json" catalogFileName = "catalog.json" propertiesFileName = "properties.json" ) var ( blacklistStreamsByTap = map[string]map[string]bool{ "tap-slack": { "messages": true, }, } errNotReady = errors.New("Singer driver isn't ready yet. Tap is being installed..") ) type Singer struct { sync.RWMutex commands map[string]*exec.Cmd ctx context.Context sourceID string tap string configPath string catalogPath string propertiesPath string statePath string pathToConfigs string tableNamePrefix string streamTableNames map[string]string catalogDiscovered *atomic.Bool closed *atomic.Bool } func init() { base.RegisterDriver(base.SingerType, NewSinger) base.RegisterTestConnectionFunc(base.SingerType, TestSinger) } //NewSinger returns Singer driver and //1. writes json files (config, catalog, properties, state) if string/raw json was provided //2. runs discover and collects catalog.json //2. creates venv //3. in another goroutine: updates pip, install singer tap func NewSinger(ctx context.Context, sourceConfig *base.SourceConfig, collection *base.Collection) (base.Driver, error) { config := &SingerConfig{} err := base.UnmarshalConfig(sourceConfig.Config, config) if err != nil { return nil, err } if err := config.Validate(); err != nil { return nil, err } if singer.Instance == nil { return nil, errors.New("singer-bridge must be configured") } pathToConfigs := path.Join(singer.Instance.VenvDir, sourceConfig.SourceID, config.Tap) if err := logging.EnsureDir(pathToConfigs); err != nil { return nil, fmt.Errorf("Error creating singer venv config dir: %v", err) } //parse singer config as file path configPath, err := parseJSONAsFile(path.Join(pathToConfigs, configFileName), config.Config) if err != nil { return nil, fmt.Errorf("Error parsing singer config [%v]: %v", config.Config, err) } //parse singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), config.Catalog) if err != nil { return nil, fmt.Errorf("Error parsing singer catalog [%v]: %v", config.Catalog, err) } // ** Table names mapping ** tableNameMappings := config.StreamTableNames if catalogPath != "" { //extract table names mapping from catalog.json tableNameMappingsFromCatalog, err := extractTableNamesMapping(catalogPath) if err != nil { logging.Errorf("[%s] Error parsing destination table names from Singer catalog.json: %v", sourceConfig.SourceID, err) } //override configuration for stream, tableName := range tableNameMappingsFromCatalog { tableNameMappings[stream] = tableName } } if len(tableNameMappings) > 0 { b, _ := json.MarshalIndent(tableNameMappings, "", " ") logging.Infof("[%s] configured Singer stream - table names mapping: %s", sourceConfig.SourceID, string(b)) } //parse singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), config.Properties) if err != nil { return nil, fmt.Errorf("Error parsing singer properties [%v]: %v", config.Properties, err) } //parse singer state as file path statePath, err := parseJSONAsFile(path.Join(pathToConfigs, stateFileName), config.InitialState) if err != nil { return nil, fmt.Errorf("Error parsing singer initial state [%v]: %v", config.InitialState, err) } catalogDiscovered := atomic.NewBool(false) if catalogPath != "" || propertiesPath != "" { catalogDiscovered.Store(true) } s := &Singer{ ctx: ctx, commands: map[string]*exec.Cmd{}, sourceID: sourceConfig.SourceID, tap: config.Tap, configPath: configPath, catalogPath: catalogPath, propertiesPath: propertiesPath, statePath: statePath, tableNamePrefix: config.StreamTableNamesPrefix, pathToConfigs: pathToConfigs, streamTableNames: tableNameMappings, catalogDiscovered: catalogDiscovered, closed: atomic.NewBool(false), } safego.Run(s.EnsureTapAndCatalog) return s, nil } //TestSinger tests singer connection (runs discover) if tap has been installed otherwise returns nil func TestSinger(sourceConfig *base.SourceConfig) error { driver, err := NewSinger(context.Background(), sourceConfig, nil) if err != nil { return err } defer driver.Close() singerDriver, _ := driver.(*Singer) ready, _ := singerDriver.Ready() if !ready { return nil } outWriter := logging.NewStringWriter() errWriter := logging.NewStringWriter() command := path.Join(singer.Instance.VenvDir, singerDriver.tap, "bin", singerDriver.tap) err = singer.Instance.ExecCmd(command, outWriter, errWriter, "-c", singerDriver.configPath, "--discover") if err != nil { return fmt.Errorf("Error singer --discover: %v. %s", err, errWriter.String()) } return nil } //EnsureTapAndCatalog ensures Singer tap via singer.Instance // and does discover if catalog wasn't provided func (s *Singer) EnsureTapAndCatalog() { singer.Instance.EnsureTap(s.tap) for { if s.closed.Load() { break } if s.catalogDiscovered.Load() { break } if !singer.Instance.IsTapReady(s.tap) { time.Sleep(time.Second) continue } catalogPath, propertiesPath, err := doDiscover(s.sourceID, s.tap, s.pathToConfigs, s.configPath) if err != nil { logging.Errorf("[%s] Error configuring Singer: %v", s.sourceID, err) time.Sleep(time.Minute) continue } s.catalogPath = catalogPath s.propertiesPath = propertiesPath s.catalogDiscovered.Store(true) return } } //GetTableNamePrefix returns stream table name prefix or sourceID_ func (s *Singer) GetTableNamePrefix() string { //put as prefix + stream if prefix exist if s.tableNamePrefix != "" { return s.tableNamePrefix } return s.sourceID + "_" } //GetCollectionTable unsupported func (s *Singer) GetCollectionTable() string { return "" } func (s *Singer) GetCollectionMetaKey() string { return s.tap } //GetAllAvailableIntervals unsupported func (s *Singer) GetAllAvailableIntervals() ([]*base.TimeInterval, error) { return nil, errors.New("Singer driver doesn't support GetAllAvailableIntervals() func. Please use SingerTask") } //GetObjectsFor unsupported func (s *Singer) GetObjectsFor(interval *base.TimeInterval) ([]map[string]interface{}, error) { return nil, errors.New("Singer driver doesn't support GetObjectsFor() func. Please use SingerTask") } //Ready returns true if catalog is discovered and tap is installed func (s *Singer) Ready() (bool, error) { if s.catalogDiscovered.Load() && singer.Instance.IsTapReady(s.tap) { return true, nil } return false, errNotReady } func (s *Singer) GetTap() string { return s.tap } func (s *Singer) Load(state string, taskLogger logging.TaskLogger, portionConsumer singer.PortionConsumer) error { if s.closed.Load() { return errors.New("Singer has already been closed") } ready, readyErr := s.Ready() if !ready { return readyErr } //update tap if err := singer.Instance.UpdateTap(s.tap); err != nil { return fmt.Errorf("Error updating singer tap [%s]: %v", s.tap, err) } //override initial state with existing one and put it to a file var statePath string var err error if state != "" { statePath, err = parseJSONAsFile(path.Join(singer.Instance.VenvDir, s.sourceID, s.tap, stateFileName), state) if err != nil { return fmt.Errorf("Error parsing singer state %s: %v", state, err) } } else { //put initial state statePath = s.statePath } args := []string{"-c", s.configPath} if s.catalogPath != "" { args = append(args, "--catalog", s.catalogPath) } if s.propertiesPath != "" { args = append(args, "-p", s.propertiesPath) } if statePath != "" { args = append(args, "--state", statePath) } command := path.Join(singer.Instance.VenvDir, s.tap, "bin", s.tap) taskLogger.INFO("exec singer %s %s", command, strings.Join(args, " ")) //exec cmd and analyze response from stdout & stderr syncCmd := exec.Command(command, args...) stdout, _ := syncCmd.StdoutPipe() defer stdout.Close() stderr, _ := syncCmd.StderrPipe() defer stderr.Close() commandID := uuid.New() s.Lock() s.commands[commandID] = syncCmd s.Unlock() defer func() { s.Lock() delete(s.commands, commandID) s.Unlock() }() err = syncCmd.Start() if err != nil { return err } var wg sync.WaitGroup var parsingErr error //writing result (singer writes result to stdout) wg.Add(1) safego.Run(func() { defer wg.Done() defer func() { if r := recover(); r != nil { logging.Error("panic in singer task") logging.Error(string(debug.Stack())) s.logAndKill(taskLogger, syncCmd, r) return } }() parsingErr = singer.StreamParseOutput(stdout, portionConsumer, taskLogger) if parsingErr != nil { s.logAndKill(taskLogger, syncCmd, parsingErr) } }) dualWriter := logging.Dual{FileWriter: taskLogger, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", s.sourceID), singer.Instance.LogWriter)} //writing process logs (singer writes process logs to stderr) wg.Add(1) safego.Run(func() { defer wg.Done() io.Copy(dualWriter, stderr) }) wg.Wait() err = syncCmd.Wait() if err != nil { return err } if parsingErr != nil { return parsingErr } return nil } func (s *Singer) Type() string { return base.SingerType } func (s *Singer) Close() (multiErr error) { s.closed.Store(true) s.Lock() for _, command := range s.commands { logging.Infof("[%s] killing process: %s", s.sourceID, command.String()) if err := command.Process.Kill(); err != nil { multiErr = multierror.Append(multiErr, fmt.Errorf("[%s] Error killing singer sync command: %v", s.sourceID, err)) } } s.Unlock() return multiErr } func (s *Singer) GetStreamTableNameMapping() map[string]string { result := map[string]string{} for name, value := range s.streamTableNames { result[name] = value } return result } func (s *Singer) logAndKill(taskLogger logging.TaskLogger, syncCmd *exec.Cmd, parsingErr interface{}) { taskLogger.ERROR("Parse output error: %v. Process will be killed", parsingErr) logging.Errorf("[%s_%s] parse output error: %v. Process will be killed", s.sourceID, s.tap, parsingErr) killErr := syncCmd.Process.Kill() if killErr != nil { taskLogger.ERROR("Error killing process: %v", killErr) logging.Errorf("[%s_%s] error killing process: %v", s.sourceID, s.tap, killErr) } } //doDiscover discovers tap catalog and returns catalog and properties paths //applies blacklist streams to taps and make other streams {"selected": true} func doDiscover(sourceID, tap, pathToConfigs, configFilePath string) (string, string, error) { if !singer.Instance.IsTapReady(tap) { return "", "", errNotReady } outWriter := logging.NewStringWriter() errStrWriter := logging.NewStringWriter() dualStdErrWriter := logging.Dual{FileWriter: errStrWriter, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", sourceID), singer.Instance.LogWriter)} command := path.Join(singer.Instance.VenvDir, tap, "bin", tap) err := singer.Instance.ExecCmd(command, outWriter, dualStdErrWriter, "-c", configFilePath, "--discover") if err != nil { return "", "", fmt.Errorf("Error singer --discover: %v. %s", err, errStrWriter.String()) } catalog := &SingerRawCatalog{} if err := json.Unmarshal(outWriter.Bytes(), &catalog); err != nil { return "", "", fmt.Errorf("Error unmarshalling catalog %s output: %v", outWriter.String(), err) } blackListStreams, ok := blacklistStreamsByTap[tap] if !ok { blackListStreams = map[string]bool{} } for _, stream := range catalog.Streams { streamName, ok := stream["stream"] if ok { if _, ok := blackListStreams[fmt.Sprint(streamName)]; ok { continue } } else { logging.Warnf("Stream [%v] doesn't have 'stream' name", stream) } //put selected=true into 'schema' schemaStruct, ok := stream["schema"] if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: key 'schema' doesn't exist", outWriter.String()) } schemaObj, ok := schemaStruct.(map[string]interface{}) if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: value under key 'schema' must be object: %T", outWriter.String(), schemaStruct) } schemaObj["selected"] = true //put selected=true into every 'metadata' object metadataArrayIface, ok := stream["metadata"] if ok { metadataArray, ok := metadataArrayIface.([]interface{}) if ok { for _, metadata := range metadataArray { metadataObj, ok := metadata.(map[string]interface{}) if ok { innerMetadata, ok := metadataObj["metadata"] if ok { innerMetadataObj, ok := innerMetadata.(map[string]interface{}) if ok { innerMetadataObj["selected"] = true } } } } } } } b, _ := json.MarshalIndent(catalog, "", " ") //write singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer catalog [%v]: %v", string(b), err) } //write singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer properties [%v]: %v", string(b), err) } return catalogPath, propertiesPath, nil } //parse value and write it to a json file //return path to created json file or return value if it is already path to json file //or empty string if value is nil func parseJSONAsFile(newPath string, value interface{}) (string, error) { if value == nil { return "", nil } switch value.(type) { case map[string]interface{}: payload := value.(map[string]interface{}) b, err := json.Marshal(payload) if err != nil { return "", fmt.Errorf("Malformed value: %v", err) } return newPath, ioutil.WriteFile(newPath, b, 0644) case string: payload := value.(string) if strings.HasPrefix(payload, "{") { return newPath, ioutil.WriteFile(newPath, []byte(payload), 0644) } //already file return payload, nil default: return "", errors.New("Unknown type. Value must be path to json file or raw json") } } func extractTableNamesMapping(catalogPath string) (map[string]string, error)
{ catalogBytes, err := ioutil.ReadFile(catalogPath) if err != nil { return nil, fmt.Errorf("Error reading catalog file: %v", err) } catalog := &SingerCatalog{} err = json.Unmarshal(catalogBytes, catalog) if err != nil { return nil, err } streamTableNamesMapping := map[string]string{} for _, stream := range catalog.Streams { if stream.DestinationTableName != "" { //add mapping stream if stream.Stream != "" { streamTableNamesMapping[stream.Stream] = stream.DestinationTableName } //add mapping tap_stream_id if stream.TapStreamID != "" { streamTableNamesMapping[stream.TapStreamID] = stream.DestinationTableName } } } return streamTableNamesMapping, nil }
identifier_body
singer.go
package singer import ( "context" "encoding/json" "errors" "fmt" "github.com/hashicorp/go-multierror" "github.com/jitsucom/jitsu/server/drivers/base" "github.com/jitsucom/jitsu/server/logging" "github.com/jitsucom/jitsu/server/safego" "github.com/jitsucom/jitsu/server/singer" "github.com/jitsucom/jitsu/server/uuid" "go.uber.org/atomic" "io" "io/ioutil" "os/exec" "path" "runtime/debug" "strings" "sync" "time" ) const ( stateFileName = "state.json" configFileName = "config.json" catalogFileName = "catalog.json" propertiesFileName = "properties.json" ) var ( blacklistStreamsByTap = map[string]map[string]bool{ "tap-slack": { "messages": true, }, } errNotReady = errors.New("Singer driver isn't ready yet. Tap is being installed..") ) type Singer struct { sync.RWMutex commands map[string]*exec.Cmd ctx context.Context sourceID string tap string configPath string catalogPath string propertiesPath string statePath string pathToConfigs string tableNamePrefix string streamTableNames map[string]string catalogDiscovered *atomic.Bool closed *atomic.Bool } func init() { base.RegisterDriver(base.SingerType, NewSinger) base.RegisterTestConnectionFunc(base.SingerType, TestSinger) } //NewSinger returns Singer driver and //1. writes json files (config, catalog, properties, state) if string/raw json was provided //2. runs discover and collects catalog.json //2. creates venv //3. in another goroutine: updates pip, install singer tap func NewSinger(ctx context.Context, sourceConfig *base.SourceConfig, collection *base.Collection) (base.Driver, error) { config := &SingerConfig{} err := base.UnmarshalConfig(sourceConfig.Config, config) if err != nil { return nil, err } if err := config.Validate(); err != nil { return nil, err } if singer.Instance == nil { return nil, errors.New("singer-bridge must be configured") } pathToConfigs := path.Join(singer.Instance.VenvDir, sourceConfig.SourceID, config.Tap) if err := logging.EnsureDir(pathToConfigs); err != nil { return nil, fmt.Errorf("Error creating singer venv config dir: %v", err) } //parse singer config as file path configPath, err := parseJSONAsFile(path.Join(pathToConfigs, configFileName), config.Config) if err != nil { return nil, fmt.Errorf("Error parsing singer config [%v]: %v", config.Config, err) } //parse singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), config.Catalog) if err != nil { return nil, fmt.Errorf("Error parsing singer catalog [%v]: %v", config.Catalog, err) } // ** Table names mapping ** tableNameMappings := config.StreamTableNames if catalogPath != "" { //extract table names mapping from catalog.json tableNameMappingsFromCatalog, err := extractTableNamesMapping(catalogPath) if err != nil { logging.Errorf("[%s] Error parsing destination table names from Singer catalog.json: %v", sourceConfig.SourceID, err) } //override configuration for stream, tableName := range tableNameMappingsFromCatalog { tableNameMappings[stream] = tableName } } if len(tableNameMappings) > 0 { b, _ := json.MarshalIndent(tableNameMappings, "", " ") logging.Infof("[%s] configured Singer stream - table names mapping: %s", sourceConfig.SourceID, string(b)) } //parse singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), config.Properties) if err != nil { return nil, fmt.Errorf("Error parsing singer properties [%v]: %v", config.Properties, err) } //parse singer state as file path statePath, err := parseJSONAsFile(path.Join(pathToConfigs, stateFileName), config.InitialState) if err != nil { return nil, fmt.Errorf("Error parsing singer initial state [%v]: %v", config.InitialState, err) } catalogDiscovered := atomic.NewBool(false) if catalogPath != "" || propertiesPath != "" { catalogDiscovered.Store(true) } s := &Singer{ ctx: ctx, commands: map[string]*exec.Cmd{}, sourceID: sourceConfig.SourceID, tap: config.Tap, configPath: configPath, catalogPath: catalogPath, propertiesPath: propertiesPath, statePath: statePath, tableNamePrefix: config.StreamTableNamesPrefix, pathToConfigs: pathToConfigs, streamTableNames: tableNameMappings, catalogDiscovered: catalogDiscovered, closed: atomic.NewBool(false), } safego.Run(s.EnsureTapAndCatalog) return s, nil } //TestSinger tests singer connection (runs discover) if tap has been installed otherwise returns nil func TestSinger(sourceConfig *base.SourceConfig) error { driver, err := NewSinger(context.Background(), sourceConfig, nil) if err != nil { return err } defer driver.Close() singerDriver, _ := driver.(*Singer) ready, _ := singerDriver.Ready() if !ready { return nil } outWriter := logging.NewStringWriter() errWriter := logging.NewStringWriter() command := path.Join(singer.Instance.VenvDir, singerDriver.tap, "bin", singerDriver.tap) err = singer.Instance.ExecCmd(command, outWriter, errWriter, "-c", singerDriver.configPath, "--discover") if err != nil { return fmt.Errorf("Error singer --discover: %v. %s", err, errWriter.String()) } return nil } //EnsureTapAndCatalog ensures Singer tap via singer.Instance // and does discover if catalog wasn't provided func (s *Singer) EnsureTapAndCatalog() { singer.Instance.EnsureTap(s.tap) for { if s.closed.Load() { break } if s.catalogDiscovered.Load() { break } if !singer.Instance.IsTapReady(s.tap) { time.Sleep(time.Second) continue } catalogPath, propertiesPath, err := doDiscover(s.sourceID, s.tap, s.pathToConfigs, s.configPath) if err != nil { logging.Errorf("[%s] Error configuring Singer: %v", s.sourceID, err) time.Sleep(time.Minute)
s.catalogPath = catalogPath s.propertiesPath = propertiesPath s.catalogDiscovered.Store(true) return } } //GetTableNamePrefix returns stream table name prefix or sourceID_ func (s *Singer) GetTableNamePrefix() string { //put as prefix + stream if prefix exist if s.tableNamePrefix != "" { return s.tableNamePrefix } return s.sourceID + "_" } //GetCollectionTable unsupported func (s *Singer) GetCollectionTable() string { return "" } func (s *Singer) GetCollectionMetaKey() string { return s.tap } //GetAllAvailableIntervals unsupported func (s *Singer) GetAllAvailableIntervals() ([]*base.TimeInterval, error) { return nil, errors.New("Singer driver doesn't support GetAllAvailableIntervals() func. Please use SingerTask") } //GetObjectsFor unsupported func (s *Singer) GetObjectsFor(interval *base.TimeInterval) ([]map[string]interface{}, error) { return nil, errors.New("Singer driver doesn't support GetObjectsFor() func. Please use SingerTask") } //Ready returns true if catalog is discovered and tap is installed func (s *Singer) Ready() (bool, error) { if s.catalogDiscovered.Load() && singer.Instance.IsTapReady(s.tap) { return true, nil } return false, errNotReady } func (s *Singer) GetTap() string { return s.tap } func (s *Singer) Load(state string, taskLogger logging.TaskLogger, portionConsumer singer.PortionConsumer) error { if s.closed.Load() { return errors.New("Singer has already been closed") } ready, readyErr := s.Ready() if !ready { return readyErr } //update tap if err := singer.Instance.UpdateTap(s.tap); err != nil { return fmt.Errorf("Error updating singer tap [%s]: %v", s.tap, err) } //override initial state with existing one and put it to a file var statePath string var err error if state != "" { statePath, err = parseJSONAsFile(path.Join(singer.Instance.VenvDir, s.sourceID, s.tap, stateFileName), state) if err != nil { return fmt.Errorf("Error parsing singer state %s: %v", state, err) } } else { //put initial state statePath = s.statePath } args := []string{"-c", s.configPath} if s.catalogPath != "" { args = append(args, "--catalog", s.catalogPath) } if s.propertiesPath != "" { args = append(args, "-p", s.propertiesPath) } if statePath != "" { args = append(args, "--state", statePath) } command := path.Join(singer.Instance.VenvDir, s.tap, "bin", s.tap) taskLogger.INFO("exec singer %s %s", command, strings.Join(args, " ")) //exec cmd and analyze response from stdout & stderr syncCmd := exec.Command(command, args...) stdout, _ := syncCmd.StdoutPipe() defer stdout.Close() stderr, _ := syncCmd.StderrPipe() defer stderr.Close() commandID := uuid.New() s.Lock() s.commands[commandID] = syncCmd s.Unlock() defer func() { s.Lock() delete(s.commands, commandID) s.Unlock() }() err = syncCmd.Start() if err != nil { return err } var wg sync.WaitGroup var parsingErr error //writing result (singer writes result to stdout) wg.Add(1) safego.Run(func() { defer wg.Done() defer func() { if r := recover(); r != nil { logging.Error("panic in singer task") logging.Error(string(debug.Stack())) s.logAndKill(taskLogger, syncCmd, r) return } }() parsingErr = singer.StreamParseOutput(stdout, portionConsumer, taskLogger) if parsingErr != nil { s.logAndKill(taskLogger, syncCmd, parsingErr) } }) dualWriter := logging.Dual{FileWriter: taskLogger, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", s.sourceID), singer.Instance.LogWriter)} //writing process logs (singer writes process logs to stderr) wg.Add(1) safego.Run(func() { defer wg.Done() io.Copy(dualWriter, stderr) }) wg.Wait() err = syncCmd.Wait() if err != nil { return err } if parsingErr != nil { return parsingErr } return nil } func (s *Singer) Type() string { return base.SingerType } func (s *Singer) Close() (multiErr error) { s.closed.Store(true) s.Lock() for _, command := range s.commands { logging.Infof("[%s] killing process: %s", s.sourceID, command.String()) if err := command.Process.Kill(); err != nil { multiErr = multierror.Append(multiErr, fmt.Errorf("[%s] Error killing singer sync command: %v", s.sourceID, err)) } } s.Unlock() return multiErr } func (s *Singer) GetStreamTableNameMapping() map[string]string { result := map[string]string{} for name, value := range s.streamTableNames { result[name] = value } return result } func (s *Singer) logAndKill(taskLogger logging.TaskLogger, syncCmd *exec.Cmd, parsingErr interface{}) { taskLogger.ERROR("Parse output error: %v. Process will be killed", parsingErr) logging.Errorf("[%s_%s] parse output error: %v. Process will be killed", s.sourceID, s.tap, parsingErr) killErr := syncCmd.Process.Kill() if killErr != nil { taskLogger.ERROR("Error killing process: %v", killErr) logging.Errorf("[%s_%s] error killing process: %v", s.sourceID, s.tap, killErr) } } //doDiscover discovers tap catalog and returns catalog and properties paths //applies blacklist streams to taps and make other streams {"selected": true} func doDiscover(sourceID, tap, pathToConfigs, configFilePath string) (string, string, error) { if !singer.Instance.IsTapReady(tap) { return "", "", errNotReady } outWriter := logging.NewStringWriter() errStrWriter := logging.NewStringWriter() dualStdErrWriter := logging.Dual{FileWriter: errStrWriter, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", sourceID), singer.Instance.LogWriter)} command := path.Join(singer.Instance.VenvDir, tap, "bin", tap) err := singer.Instance.ExecCmd(command, outWriter, dualStdErrWriter, "-c", configFilePath, "--discover") if err != nil { return "", "", fmt.Errorf("Error singer --discover: %v. %s", err, errStrWriter.String()) } catalog := &SingerRawCatalog{} if err := json.Unmarshal(outWriter.Bytes(), &catalog); err != nil { return "", "", fmt.Errorf("Error unmarshalling catalog %s output: %v", outWriter.String(), err) } blackListStreams, ok := blacklistStreamsByTap[tap] if !ok { blackListStreams = map[string]bool{} } for _, stream := range catalog.Streams { streamName, ok := stream["stream"] if ok { if _, ok := blackListStreams[fmt.Sprint(streamName)]; ok { continue } } else { logging.Warnf("Stream [%v] doesn't have 'stream' name", stream) } //put selected=true into 'schema' schemaStruct, ok := stream["schema"] if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: key 'schema' doesn't exist", outWriter.String()) } schemaObj, ok := schemaStruct.(map[string]interface{}) if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: value under key 'schema' must be object: %T", outWriter.String(), schemaStruct) } schemaObj["selected"] = true //put selected=true into every 'metadata' object metadataArrayIface, ok := stream["metadata"] if ok { metadataArray, ok := metadataArrayIface.([]interface{}) if ok { for _, metadata := range metadataArray { metadataObj, ok := metadata.(map[string]interface{}) if ok { innerMetadata, ok := metadataObj["metadata"] if ok { innerMetadataObj, ok := innerMetadata.(map[string]interface{}) if ok { innerMetadataObj["selected"] = true } } } } } } } b, _ := json.MarshalIndent(catalog, "", " ") //write singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer catalog [%v]: %v", string(b), err) } //write singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer properties [%v]: %v", string(b), err) } return catalogPath, propertiesPath, nil } //parse value and write it to a json file //return path to created json file or return value if it is already path to json file //or empty string if value is nil func parseJSONAsFile(newPath string, value interface{}) (string, error) { if value == nil { return "", nil } switch value.(type) { case map[string]interface{}: payload := value.(map[string]interface{}) b, err := json.Marshal(payload) if err != nil { return "", fmt.Errorf("Malformed value: %v", err) } return newPath, ioutil.WriteFile(newPath, b, 0644) case string: payload := value.(string) if strings.HasPrefix(payload, "{") { return newPath, ioutil.WriteFile(newPath, []byte(payload), 0644) } //already file return payload, nil default: return "", errors.New("Unknown type. Value must be path to json file or raw json") } } func extractTableNamesMapping(catalogPath string) (map[string]string, error) { catalogBytes, err := ioutil.ReadFile(catalogPath) if err != nil { return nil, fmt.Errorf("Error reading catalog file: %v", err) } catalog := &SingerCatalog{} err = json.Unmarshal(catalogBytes, catalog) if err != nil { return nil, err } streamTableNamesMapping := map[string]string{} for _, stream := range catalog.Streams { if stream.DestinationTableName != "" { //add mapping stream if stream.Stream != "" { streamTableNamesMapping[stream.Stream] = stream.DestinationTableName } //add mapping tap_stream_id if stream.TapStreamID != "" { streamTableNamesMapping[stream.TapStreamID] = stream.DestinationTableName } } } return streamTableNamesMapping, nil }
continue }
random_line_split
singer.go
package singer import ( "context" "encoding/json" "errors" "fmt" "github.com/hashicorp/go-multierror" "github.com/jitsucom/jitsu/server/drivers/base" "github.com/jitsucom/jitsu/server/logging" "github.com/jitsucom/jitsu/server/safego" "github.com/jitsucom/jitsu/server/singer" "github.com/jitsucom/jitsu/server/uuid" "go.uber.org/atomic" "io" "io/ioutil" "os/exec" "path" "runtime/debug" "strings" "sync" "time" ) const ( stateFileName = "state.json" configFileName = "config.json" catalogFileName = "catalog.json" propertiesFileName = "properties.json" ) var ( blacklistStreamsByTap = map[string]map[string]bool{ "tap-slack": { "messages": true, }, } errNotReady = errors.New("Singer driver isn't ready yet. Tap is being installed..") ) type Singer struct { sync.RWMutex commands map[string]*exec.Cmd ctx context.Context sourceID string tap string configPath string catalogPath string propertiesPath string statePath string pathToConfigs string tableNamePrefix string streamTableNames map[string]string catalogDiscovered *atomic.Bool closed *atomic.Bool } func init() { base.RegisterDriver(base.SingerType, NewSinger) base.RegisterTestConnectionFunc(base.SingerType, TestSinger) } //NewSinger returns Singer driver and //1. writes json files (config, catalog, properties, state) if string/raw json was provided //2. runs discover and collects catalog.json //2. creates venv //3. in another goroutine: updates pip, install singer tap func NewSinger(ctx context.Context, sourceConfig *base.SourceConfig, collection *base.Collection) (base.Driver, error) { config := &SingerConfig{} err := base.UnmarshalConfig(sourceConfig.Config, config) if err != nil { return nil, err } if err := config.Validate(); err != nil { return nil, err } if singer.Instance == nil { return nil, errors.New("singer-bridge must be configured") } pathToConfigs := path.Join(singer.Instance.VenvDir, sourceConfig.SourceID, config.Tap) if err := logging.EnsureDir(pathToConfigs); err != nil { return nil, fmt.Errorf("Error creating singer venv config dir: %v", err) } //parse singer config as file path configPath, err := parseJSONAsFile(path.Join(pathToConfigs, configFileName), config.Config) if err != nil { return nil, fmt.Errorf("Error parsing singer config [%v]: %v", config.Config, err) } //parse singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), config.Catalog) if err != nil { return nil, fmt.Errorf("Error parsing singer catalog [%v]: %v", config.Catalog, err) } // ** Table names mapping ** tableNameMappings := config.StreamTableNames if catalogPath != "" { //extract table names mapping from catalog.json tableNameMappingsFromCatalog, err := extractTableNamesMapping(catalogPath) if err != nil { logging.Errorf("[%s] Error parsing destination table names from Singer catalog.json: %v", sourceConfig.SourceID, err) } //override configuration for stream, tableName := range tableNameMappingsFromCatalog { tableNameMappings[stream] = tableName } } if len(tableNameMappings) > 0 { b, _ := json.MarshalIndent(tableNameMappings, "", " ") logging.Infof("[%s] configured Singer stream - table names mapping: %s", sourceConfig.SourceID, string(b)) } //parse singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), config.Properties) if err != nil { return nil, fmt.Errorf("Error parsing singer properties [%v]: %v", config.Properties, err) } //parse singer state as file path statePath, err := parseJSONAsFile(path.Join(pathToConfigs, stateFileName), config.InitialState) if err != nil { return nil, fmt.Errorf("Error parsing singer initial state [%v]: %v", config.InitialState, err) } catalogDiscovered := atomic.NewBool(false) if catalogPath != "" || propertiesPath != "" { catalogDiscovered.Store(true) } s := &Singer{ ctx: ctx, commands: map[string]*exec.Cmd{}, sourceID: sourceConfig.SourceID, tap: config.Tap, configPath: configPath, catalogPath: catalogPath, propertiesPath: propertiesPath, statePath: statePath, tableNamePrefix: config.StreamTableNamesPrefix, pathToConfigs: pathToConfigs, streamTableNames: tableNameMappings, catalogDiscovered: catalogDiscovered, closed: atomic.NewBool(false), } safego.Run(s.EnsureTapAndCatalog) return s, nil } //TestSinger tests singer connection (runs discover) if tap has been installed otherwise returns nil func TestSinger(sourceConfig *base.SourceConfig) error { driver, err := NewSinger(context.Background(), sourceConfig, nil) if err != nil { return err } defer driver.Close() singerDriver, _ := driver.(*Singer) ready, _ := singerDriver.Ready() if !ready { return nil } outWriter := logging.NewStringWriter() errWriter := logging.NewStringWriter() command := path.Join(singer.Instance.VenvDir, singerDriver.tap, "bin", singerDriver.tap) err = singer.Instance.ExecCmd(command, outWriter, errWriter, "-c", singerDriver.configPath, "--discover") if err != nil { return fmt.Errorf("Error singer --discover: %v. %s", err, errWriter.String()) } return nil } //EnsureTapAndCatalog ensures Singer tap via singer.Instance // and does discover if catalog wasn't provided func (s *Singer) EnsureTapAndCatalog() { singer.Instance.EnsureTap(s.tap) for { if s.closed.Load()
if s.catalogDiscovered.Load() { break } if !singer.Instance.IsTapReady(s.tap) { time.Sleep(time.Second) continue } catalogPath, propertiesPath, err := doDiscover(s.sourceID, s.tap, s.pathToConfigs, s.configPath) if err != nil { logging.Errorf("[%s] Error configuring Singer: %v", s.sourceID, err) time.Sleep(time.Minute) continue } s.catalogPath = catalogPath s.propertiesPath = propertiesPath s.catalogDiscovered.Store(true) return } } //GetTableNamePrefix returns stream table name prefix or sourceID_ func (s *Singer) GetTableNamePrefix() string { //put as prefix + stream if prefix exist if s.tableNamePrefix != "" { return s.tableNamePrefix } return s.sourceID + "_" } //GetCollectionTable unsupported func (s *Singer) GetCollectionTable() string { return "" } func (s *Singer) GetCollectionMetaKey() string { return s.tap } //GetAllAvailableIntervals unsupported func (s *Singer) GetAllAvailableIntervals() ([]*base.TimeInterval, error) { return nil, errors.New("Singer driver doesn't support GetAllAvailableIntervals() func. Please use SingerTask") } //GetObjectsFor unsupported func (s *Singer) GetObjectsFor(interval *base.TimeInterval) ([]map[string]interface{}, error) { return nil, errors.New("Singer driver doesn't support GetObjectsFor() func. Please use SingerTask") } //Ready returns true if catalog is discovered and tap is installed func (s *Singer) Ready() (bool, error) { if s.catalogDiscovered.Load() && singer.Instance.IsTapReady(s.tap) { return true, nil } return false, errNotReady } func (s *Singer) GetTap() string { return s.tap } func (s *Singer) Load(state string, taskLogger logging.TaskLogger, portionConsumer singer.PortionConsumer) error { if s.closed.Load() { return errors.New("Singer has already been closed") } ready, readyErr := s.Ready() if !ready { return readyErr } //update tap if err := singer.Instance.UpdateTap(s.tap); err != nil { return fmt.Errorf("Error updating singer tap [%s]: %v", s.tap, err) } //override initial state with existing one and put it to a file var statePath string var err error if state != "" { statePath, err = parseJSONAsFile(path.Join(singer.Instance.VenvDir, s.sourceID, s.tap, stateFileName), state) if err != nil { return fmt.Errorf("Error parsing singer state %s: %v", state, err) } } else { //put initial state statePath = s.statePath } args := []string{"-c", s.configPath} if s.catalogPath != "" { args = append(args, "--catalog", s.catalogPath) } if s.propertiesPath != "" { args = append(args, "-p", s.propertiesPath) } if statePath != "" { args = append(args, "--state", statePath) } command := path.Join(singer.Instance.VenvDir, s.tap, "bin", s.tap) taskLogger.INFO("exec singer %s %s", command, strings.Join(args, " ")) //exec cmd and analyze response from stdout & stderr syncCmd := exec.Command(command, args...) stdout, _ := syncCmd.StdoutPipe() defer stdout.Close() stderr, _ := syncCmd.StderrPipe() defer stderr.Close() commandID := uuid.New() s.Lock() s.commands[commandID] = syncCmd s.Unlock() defer func() { s.Lock() delete(s.commands, commandID) s.Unlock() }() err = syncCmd.Start() if err != nil { return err } var wg sync.WaitGroup var parsingErr error //writing result (singer writes result to stdout) wg.Add(1) safego.Run(func() { defer wg.Done() defer func() { if r := recover(); r != nil { logging.Error("panic in singer task") logging.Error(string(debug.Stack())) s.logAndKill(taskLogger, syncCmd, r) return } }() parsingErr = singer.StreamParseOutput(stdout, portionConsumer, taskLogger) if parsingErr != nil { s.logAndKill(taskLogger, syncCmd, parsingErr) } }) dualWriter := logging.Dual{FileWriter: taskLogger, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", s.sourceID), singer.Instance.LogWriter)} //writing process logs (singer writes process logs to stderr) wg.Add(1) safego.Run(func() { defer wg.Done() io.Copy(dualWriter, stderr) }) wg.Wait() err = syncCmd.Wait() if err != nil { return err } if parsingErr != nil { return parsingErr } return nil } func (s *Singer) Type() string { return base.SingerType } func (s *Singer) Close() (multiErr error) { s.closed.Store(true) s.Lock() for _, command := range s.commands { logging.Infof("[%s] killing process: %s", s.sourceID, command.String()) if err := command.Process.Kill(); err != nil { multiErr = multierror.Append(multiErr, fmt.Errorf("[%s] Error killing singer sync command: %v", s.sourceID, err)) } } s.Unlock() return multiErr } func (s *Singer) GetStreamTableNameMapping() map[string]string { result := map[string]string{} for name, value := range s.streamTableNames { result[name] = value } return result } func (s *Singer) logAndKill(taskLogger logging.TaskLogger, syncCmd *exec.Cmd, parsingErr interface{}) { taskLogger.ERROR("Parse output error: %v. Process will be killed", parsingErr) logging.Errorf("[%s_%s] parse output error: %v. Process will be killed", s.sourceID, s.tap, parsingErr) killErr := syncCmd.Process.Kill() if killErr != nil { taskLogger.ERROR("Error killing process: %v", killErr) logging.Errorf("[%s_%s] error killing process: %v", s.sourceID, s.tap, killErr) } } //doDiscover discovers tap catalog and returns catalog and properties paths //applies blacklist streams to taps and make other streams {"selected": true} func doDiscover(sourceID, tap, pathToConfigs, configFilePath string) (string, string, error) { if !singer.Instance.IsTapReady(tap) { return "", "", errNotReady } outWriter := logging.NewStringWriter() errStrWriter := logging.NewStringWriter() dualStdErrWriter := logging.Dual{FileWriter: errStrWriter, Stdout: logging.NewPrefixDateTimeProxy(fmt.Sprintf("[%s]", sourceID), singer.Instance.LogWriter)} command := path.Join(singer.Instance.VenvDir, tap, "bin", tap) err := singer.Instance.ExecCmd(command, outWriter, dualStdErrWriter, "-c", configFilePath, "--discover") if err != nil { return "", "", fmt.Errorf("Error singer --discover: %v. %s", err, errStrWriter.String()) } catalog := &SingerRawCatalog{} if err := json.Unmarshal(outWriter.Bytes(), &catalog); err != nil { return "", "", fmt.Errorf("Error unmarshalling catalog %s output: %v", outWriter.String(), err) } blackListStreams, ok := blacklistStreamsByTap[tap] if !ok { blackListStreams = map[string]bool{} } for _, stream := range catalog.Streams { streamName, ok := stream["stream"] if ok { if _, ok := blackListStreams[fmt.Sprint(streamName)]; ok { continue } } else { logging.Warnf("Stream [%v] doesn't have 'stream' name", stream) } //put selected=true into 'schema' schemaStruct, ok := stream["schema"] if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: key 'schema' doesn't exist", outWriter.String()) } schemaObj, ok := schemaStruct.(map[string]interface{}) if !ok { return "", "", fmt.Errorf("Malformed discovered catalog structure %s: value under key 'schema' must be object: %T", outWriter.String(), schemaStruct) } schemaObj["selected"] = true //put selected=true into every 'metadata' object metadataArrayIface, ok := stream["metadata"] if ok { metadataArray, ok := metadataArrayIface.([]interface{}) if ok { for _, metadata := range metadataArray { metadataObj, ok := metadata.(map[string]interface{}) if ok { innerMetadata, ok := metadataObj["metadata"] if ok { innerMetadataObj, ok := innerMetadata.(map[string]interface{}) if ok { innerMetadataObj["selected"] = true } } } } } } } b, _ := json.MarshalIndent(catalog, "", " ") //write singer catalog as file path catalogPath, err := parseJSONAsFile(path.Join(pathToConfigs, catalogFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer catalog [%v]: %v", string(b), err) } //write singer properties as file path propertiesPath, err := parseJSONAsFile(path.Join(pathToConfigs, propertiesFileName), string(b)) if err != nil { return "", "", fmt.Errorf("Error writing discovered singer properties [%v]: %v", string(b), err) } return catalogPath, propertiesPath, nil } //parse value and write it to a json file //return path to created json file or return value if it is already path to json file //or empty string if value is nil func parseJSONAsFile(newPath string, value interface{}) (string, error) { if value == nil { return "", nil } switch value.(type) { case map[string]interface{}: payload := value.(map[string]interface{}) b, err := json.Marshal(payload) if err != nil { return "", fmt.Errorf("Malformed value: %v", err) } return newPath, ioutil.WriteFile(newPath, b, 0644) case string: payload := value.(string) if strings.HasPrefix(payload, "{") { return newPath, ioutil.WriteFile(newPath, []byte(payload), 0644) } //already file return payload, nil default: return "", errors.New("Unknown type. Value must be path to json file or raw json") } } func extractTableNamesMapping(catalogPath string) (map[string]string, error) { catalogBytes, err := ioutil.ReadFile(catalogPath) if err != nil { return nil, fmt.Errorf("Error reading catalog file: %v", err) } catalog := &SingerCatalog{} err = json.Unmarshal(catalogBytes, catalog) if err != nil { return nil, err } streamTableNamesMapping := map[string]string{} for _, stream := range catalog.Streams { if stream.DestinationTableName != "" { //add mapping stream if stream.Stream != "" { streamTableNamesMapping[stream.Stream] = stream.DestinationTableName } //add mapping tap_stream_id if stream.TapStreamID != "" { streamTableNamesMapping[stream.TapStreamID] = stream.DestinationTableName } } } return streamTableNamesMapping, nil }
{ break }
conditional_block
mapper.rs
use std::{ borrow::Cow, cmp::{Ordering, Reverse}, collections::HashMap, sync::Arc, }; use bathbot_macros::{command, HasName, SlashCommand}; use bathbot_model::ScoreSlim; use bathbot_psql::model::configs::{ListSize, MinimizedPp}; use bathbot_util::{ constants::{GENERAL_ISSUE, OSU_API_ISSUE}, matcher, CowUtils, }; use eyre::{Report, Result}; use rosu_v2::{ prelude::{GameMode, Grade, OsuError, Score}, request::UserId, }; use twilight_interactions::command::{CommandModel, CreateCommand}; use twilight_model::id::{marker::UserMarker, Id}; use super::{require_link, user_not_found, ScoreOrder, TopEntry}; use crate::{ active::{impls::TopPagination, ActiveMessages}, commands::GameModeOption, core::commands::{prefix::Args, CommandOrigin}, manager::redis::{osu::UserArgs, RedisData}, util::{interaction::InteractionCommand, ChannelExt, InteractionCommandExt}, Context, }; #[derive(CommandModel, CreateCommand, HasName, SlashCommand)] #[command( name = "mapper", desc = "How often does the given mapper appear in top a user's top plays", help = "Count the top plays on maps of the given mapper.\n\ It will try to consider guest difficulties so that if a map was created by someone else \ but the given mapper made the guest diff, it will count.\n\ Similarly, if the given mapper created the mapset but someone else guest diff'd, \ it will not count.\n\ This does not always work perfectly, especially for older maps but it's what the api provides." )] pub struct Mapper<'a> { #[command(desc = "Specify a mapper username")] mapper: Cow<'a, str>, #[command(desc = "Specify a gamemode")] mode: Option<GameModeOption>, #[command(desc = "Specify a username")] name: Option<Cow<'a, str>>, #[command(desc = "Choose how the scores should be ordered")] sort: Option<ScoreOrder>, #[command( desc = "Specify a linked discord user", help = "Instead of specifying an osu! username with the `name` option, \ you can use this option to choose a discord user.\n\ Only works on users who have used the `/link` command." )] discord: Option<Id<UserMarker>>, #[command( desc = "Size of the embed", help = "Size of the embed.\n\ `Condensed` shows 10 scores, `Detailed` shows 5, and `Single` shows 1.\n\ The default can be set with the `/config` command." )] size: Option<ListSize>, } impl<'m> Mapper<'m> { fn args( mode: Option<GameModeOption>, mut args: Args<'m>, mapper: Option<&'static str>, ) -> Result<Self, &'static str> { let mapper = match mapper.or_else(|| args.next()) { Some(arg) => arg.into(), None => { let content = "You need to specify at least one osu! username for the mapper. \ If you're not linked, you must specify at least two names."; return Err(content); } }; let mut name = None; let mut discord = None; if let Some(arg) = args.next() { match matcher::get_mention_user(arg) { Some(id) => discord = Some(id), None => name = Some(arg.into()), } } Ok(Self { mapper, mode, name, sort: None, discord, size: None, }) } } #[command] #[desc("How many maps of a user's top100 are made by the given mapper?")] #[help( "Display the top plays of a user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[group(Osu)] async fn prefix_mapper(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(None, args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a mania user's top100 are made by the given mapper?")] #[help( "Display the top plays of a mania user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[alias("mapperm")] #[group(Mania)] pub async fn prefix_mappermania(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Mania), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a taiko user's top100 are made by the given mapper?")] #[help( "Display the top plays of a taiko user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[alias("mappert")] #[group(Taiko)] pub async fn prefix_mappertaiko(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Taiko), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a ctb user's top100 are made by the given mapper?")] #[help( "Display the top plays of a ctb user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[aliases("mapperc", "mappercatch")] #[group(Catch)] async fn prefix_mapperctb(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Catch), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a user's top100 are made by Sotarks?")] #[usage("[username]")] #[example("badewanne3")] #[group(Osu)] pub async fn prefix_sotarks(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Osu), args, Some("sotarks")) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } async fn slash_mapper(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> { let args = Mapper::from_interaction(command.input_data())?; mapper(ctx, (&mut command).into(), args).await } async fn mapper(ctx: Arc<Context>, orig: CommandOrigin<'_>, args: Mapper<'_>) -> Result<()> { let msg_owner = orig.user_id()?; let mut config = match ctx.user_config().with_osu_id(msg_owner).await { Ok(config) => config, Err(err) => { let _ = orig.error(&ctx, GENERAL_ISSUE).await; return Err(err); } }; let mode = args .mode .map(GameMode::from) .or(config.mode) .unwrap_or(GameMode::Osu); let user_id = match user_id!(ctx, orig, args) { Some(user_id) => user_id, None => match config.osu.take() { Some(user_id) => UserId::Id(user_id), None => return require_link(&ctx, &orig).await, }, }; let mapper = args.mapper.cow_to_ascii_lowercase(); let mapper_args = UserArgs::username(&ctx, mapper.as_ref()).await.mode(mode); let mapper_fut = ctx.redis().osu_user(mapper_args); // Retrieve the user and their top scores let user_args = UserArgs::rosu_id(&ctx, &user_id).await.mode(mode); let scores_fut = ctx.osu_scores().top().limit(100).exec_with_user(user_args); let (mapper, user, scores) = match tokio::join!(mapper_fut, scores_fut) { (Ok(mapper), Ok((user, scores))) => (mapper, user, scores), (Err(OsuError::NotFound), _) => { let content = format!("Mapper with username `{mapper}` was not found"); return orig.error(&ctx, content).await; } (_, Err(OsuError::NotFound)) => { let content = user_not_found(&ctx, user_id).await; return orig.error(&ctx, content).await; } (Err(err), _) | (_, Err(err)) => { let _ = orig.error(&ctx, OSU_API_ISSUE).await; let err = Report::new(err).wrap_err("failed to get mapper, user, or scores"); return Err(err); } }; let (mapper_name, mapper_id) = match &mapper { RedisData::Original(mapper) => (mapper.username.as_str(), mapper.user_id), RedisData::Archive(mapper) => (mapper.username.as_str(), mapper.user_id), }; let username = user.username(); let entries = match process_scores(&ctx, scores, mapper_id, args.sort).await { Ok(entries) => entries, Err(err) => { let _ = orig.error(&ctx, GENERAL_ISSUE).await; return Err(err.wrap_err("failed to process scores")); } }; // Accumulate all necessary data let content = match mapper_name { "Sotarks" => { let amount = entries.len(); let mut content = format!( "I found {amount} Sotarks map{plural} in `{username}`'s top100, ", amount = amount, plural = if amount != 1 { "s" } else { "" }, ); let to_push = match amount { 0 => "I'm proud \\:)", 1..=4 => "that's already too many...", 5..=8 => "kinda sad \\:/", 9..=15 => "pretty sad \\:(", 16..=25 => "this is so sad \\:((", 26..=35 => "this needs to stop", 36..=49 => "that's a serious problem...", 50 => "that's half. HALF.", 51..=79 => "how do you sleep at night...", 80..=99 => "i'm not even mad, that's just impressive", 100 => "you did it. \"Congrats\".", _ => "wait how did you do that", }; content.push_str(to_push); content } _ => format!( "{count} of `{username}`'{genitive} top score maps were mapped by `{mapper_name}`", count = entries.len(), genitive = if username.ends_with('s') { "" } else { "s" }, ), }; let sort_by = args.sort.unwrap_or(ScoreOrder::Pp).into(); let farm = HashMap::default(); let list_size = match args.size.or(config.list_size) { Some(size) => size, None => match orig.guild_id() { Some(guild_id) => ctx .guild_config() .peek(guild_id, |config| config.list_size) .await .unwrap_or_default(), None => ListSize::default(), }, }; let minimized_pp = match config.minimized_pp { Some(minimized_pp) => minimized_pp, None => match list_size { ListSize::Condensed | ListSize::Detailed => MinimizedPp::default(), ListSize::Single => match orig.guild_id() { Some(guild_id) => ctx .guild_config() .peek(guild_id, |config| config.minimized_pp) .await .unwrap_or_default(), None => MinimizedPp::default(), }, }, }; let pagination = TopPagination::builder() .user(user) .mode(mode) .entries(entries.into_boxed_slice()) .sort_by(sort_by) .farm(farm) .list_size(list_size) .minimized_pp(minimized_pp) .content(content.into_boxed_str()) .msg_owner(msg_owner) .build(); ActiveMessages::builder(pagination) .start_by_update(true) .begin(ctx, orig) .await } async fn process_scores( ctx: &Context, scores: Vec<Score>, mapper_id: u32, sort: Option<ScoreOrder>, ) -> Result<Vec<TopEntry>> { let mut entries = Vec::new(); let maps_id_checksum = scores .iter() .filter_map(|score| score.map.as_ref()) .filter(|map| map.creator_id == mapper_id) .map(|map| (map.map_id as i32, map.checksum.as_deref())) .collect(); let mut maps = ctx.osu_map().maps(&maps_id_checksum).await?; for (i, score) in scores.into_iter().enumerate() { let Some(mut map) = maps.remove(&score.map_id) else { continue }; map.convert_mut(score.mode); let mut calc = ctx.pp(&map).mode(score.mode).mods(score.mods.bits()); let attrs = calc.difficulty().await; let stars = attrs.stars() as f32; let max_combo = attrs.max_combo() as u32; let pp = score.pp.expect("missing pp"); let max_pp = match score .pp .filter(|_| score.grade.eq_letter(Grade::X) && score.mode != GameMode::Mania) { Some(pp) => pp, None => calc.performance().await.pp() as f32, }; let entry = TopEntry { original_idx: i, replay: score.replay, score: ScoreSlim::new(score, pp), map, max_pp, stars, max_combo, }; entries.push(entry); } match sort { None => {} Some(ScoreOrder::Acc) => entries.sort_by(|a, b| { b.score .accuracy .partial_cmp(&a.score.accuracy) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::Bpm) => entries.sort_by(|a, b| { b.map .bpm() .partial_cmp(&a.map.bpm()) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::Combo) => entries.sort_by_key(|entry| Reverse(entry.score.max_combo)), Some(ScoreOrder::Date) => entries.sort_by_key(|entry| Reverse(entry.score.ended_at)), Some(ScoreOrder::Length) => { entries.sort_by(|a, b| { let a_len = a.map.seconds_drain() as f32 / a.score.mods.clock_rate().unwrap_or(1.0); let b_len = b.map.seconds_drain() as f32 / b.score.mods.clock_rate().unwrap_or(1.0); b_len.partial_cmp(&a_len).unwrap_or(Ordering::Equal) }); } Some(ScoreOrder::Misses) => entries.sort_by(|a, b| { b.score .statistics
let hits_b = b.score.total_hits(); let ratio_a = a.score.statistics.count_miss as f32 / hits_a as f32; let ratio_b = b.score.statistics.count_miss as f32 / hits_b as f32; ratio_b .partial_cmp(&ratio_a) .unwrap_or(Ordering::Equal) .then_with(|| hits_b.cmp(&hits_a)) }) }), Some(ScoreOrder::Pp) => entries.sort_by(|a, b| { b.score .pp .partial_cmp(&a.score.pp) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::RankedDate) => { entries.sort_by_key(|entry| Reverse(entry.map.ranked_date())) } Some(ScoreOrder::Score) => entries.sort_by_key(|entry| Reverse(entry.score.score)), Some(ScoreOrder::Stars) => { entries.sort_by(|a, b| b.stars.partial_cmp(&a.stars).unwrap_or(Ordering::Equal)) } } Ok(entries) }
.count_miss .cmp(&a.score.statistics.count_miss) .then_with(|| { let hits_a = a.score.total_hits();
random_line_split
mapper.rs
use std::{ borrow::Cow, cmp::{Ordering, Reverse}, collections::HashMap, sync::Arc, }; use bathbot_macros::{command, HasName, SlashCommand}; use bathbot_model::ScoreSlim; use bathbot_psql::model::configs::{ListSize, MinimizedPp}; use bathbot_util::{ constants::{GENERAL_ISSUE, OSU_API_ISSUE}, matcher, CowUtils, }; use eyre::{Report, Result}; use rosu_v2::{ prelude::{GameMode, Grade, OsuError, Score}, request::UserId, }; use twilight_interactions::command::{CommandModel, CreateCommand}; use twilight_model::id::{marker::UserMarker, Id}; use super::{require_link, user_not_found, ScoreOrder, TopEntry}; use crate::{ active::{impls::TopPagination, ActiveMessages}, commands::GameModeOption, core::commands::{prefix::Args, CommandOrigin}, manager::redis::{osu::UserArgs, RedisData}, util::{interaction::InteractionCommand, ChannelExt, InteractionCommandExt}, Context, }; #[derive(CommandModel, CreateCommand, HasName, SlashCommand)] #[command( name = "mapper", desc = "How often does the given mapper appear in top a user's top plays", help = "Count the top plays on maps of the given mapper.\n\ It will try to consider guest difficulties so that if a map was created by someone else \ but the given mapper made the guest diff, it will count.\n\ Similarly, if the given mapper created the mapset but someone else guest diff'd, \ it will not count.\n\ This does not always work perfectly, especially for older maps but it's what the api provides." )] pub struct Mapper<'a> { #[command(desc = "Specify a mapper username")] mapper: Cow<'a, str>, #[command(desc = "Specify a gamemode")] mode: Option<GameModeOption>, #[command(desc = "Specify a username")] name: Option<Cow<'a, str>>, #[command(desc = "Choose how the scores should be ordered")] sort: Option<ScoreOrder>, #[command( desc = "Specify a linked discord user", help = "Instead of specifying an osu! username with the `name` option, \ you can use this option to choose a discord user.\n\ Only works on users who have used the `/link` command." )] discord: Option<Id<UserMarker>>, #[command( desc = "Size of the embed", help = "Size of the embed.\n\ `Condensed` shows 10 scores, `Detailed` shows 5, and `Single` shows 1.\n\ The default can be set with the `/config` command." )] size: Option<ListSize>, } impl<'m> Mapper<'m> { fn args( mode: Option<GameModeOption>, mut args: Args<'m>, mapper: Option<&'static str>, ) -> Result<Self, &'static str> { let mapper = match mapper.or_else(|| args.next()) { Some(arg) => arg.into(), None => { let content = "You need to specify at least one osu! username for the mapper. \ If you're not linked, you must specify at least two names."; return Err(content); } }; let mut name = None; let mut discord = None; if let Some(arg) = args.next() { match matcher::get_mention_user(arg) { Some(id) => discord = Some(id), None => name = Some(arg.into()), } } Ok(Self { mapper, mode, name, sort: None, discord, size: None, }) } } #[command] #[desc("How many maps of a user's top100 are made by the given mapper?")] #[help( "Display the top plays of a user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[group(Osu)] async fn prefix_mapper(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(None, args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a mania user's top100 are made by the given mapper?")] #[help( "Display the top plays of a mania user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[alias("mapperm")] #[group(Mania)] pub async fn prefix_mappermania(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Mania), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a taiko user's top100 are made by the given mapper?")] #[help( "Display the top plays of a taiko user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[alias("mappert")] #[group(Taiko)] pub async fn
(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Taiko), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a ctb user's top100 are made by the given mapper?")] #[help( "Display the top plays of a ctb user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[aliases("mapperc", "mappercatch")] #[group(Catch)] async fn prefix_mapperctb(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Catch), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a user's top100 are made by Sotarks?")] #[usage("[username]")] #[example("badewanne3")] #[group(Osu)] pub async fn prefix_sotarks(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Osu), args, Some("sotarks")) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } async fn slash_mapper(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> { let args = Mapper::from_interaction(command.input_data())?; mapper(ctx, (&mut command).into(), args).await } async fn mapper(ctx: Arc<Context>, orig: CommandOrigin<'_>, args: Mapper<'_>) -> Result<()> { let msg_owner = orig.user_id()?; let mut config = match ctx.user_config().with_osu_id(msg_owner).await { Ok(config) => config, Err(err) => { let _ = orig.error(&ctx, GENERAL_ISSUE).await; return Err(err); } }; let mode = args .mode .map(GameMode::from) .or(config.mode) .unwrap_or(GameMode::Osu); let user_id = match user_id!(ctx, orig, args) { Some(user_id) => user_id, None => match config.osu.take() { Some(user_id) => UserId::Id(user_id), None => return require_link(&ctx, &orig).await, }, }; let mapper = args.mapper.cow_to_ascii_lowercase(); let mapper_args = UserArgs::username(&ctx, mapper.as_ref()).await.mode(mode); let mapper_fut = ctx.redis().osu_user(mapper_args); // Retrieve the user and their top scores let user_args = UserArgs::rosu_id(&ctx, &user_id).await.mode(mode); let scores_fut = ctx.osu_scores().top().limit(100).exec_with_user(user_args); let (mapper, user, scores) = match tokio::join!(mapper_fut, scores_fut) { (Ok(mapper), Ok((user, scores))) => (mapper, user, scores), (Err(OsuError::NotFound), _) => { let content = format!("Mapper with username `{mapper}` was not found"); return orig.error(&ctx, content).await; } (_, Err(OsuError::NotFound)) => { let content = user_not_found(&ctx, user_id).await; return orig.error(&ctx, content).await; } (Err(err), _) | (_, Err(err)) => { let _ = orig.error(&ctx, OSU_API_ISSUE).await; let err = Report::new(err).wrap_err("failed to get mapper, user, or scores"); return Err(err); } }; let (mapper_name, mapper_id) = match &mapper { RedisData::Original(mapper) => (mapper.username.as_str(), mapper.user_id), RedisData::Archive(mapper) => (mapper.username.as_str(), mapper.user_id), }; let username = user.username(); let entries = match process_scores(&ctx, scores, mapper_id, args.sort).await { Ok(entries) => entries, Err(err) => { let _ = orig.error(&ctx, GENERAL_ISSUE).await; return Err(err.wrap_err("failed to process scores")); } }; // Accumulate all necessary data let content = match mapper_name { "Sotarks" => { let amount = entries.len(); let mut content = format!( "I found {amount} Sotarks map{plural} in `{username}`'s top100, ", amount = amount, plural = if amount != 1 { "s" } else { "" }, ); let to_push = match amount { 0 => "I'm proud \\:)", 1..=4 => "that's already too many...", 5..=8 => "kinda sad \\:/", 9..=15 => "pretty sad \\:(", 16..=25 => "this is so sad \\:((", 26..=35 => "this needs to stop", 36..=49 => "that's a serious problem...", 50 => "that's half. HALF.", 51..=79 => "how do you sleep at night...", 80..=99 => "i'm not even mad, that's just impressive", 100 => "you did it. \"Congrats\".", _ => "wait how did you do that", }; content.push_str(to_push); content } _ => format!( "{count} of `{username}`'{genitive} top score maps were mapped by `{mapper_name}`", count = entries.len(), genitive = if username.ends_with('s') { "" } else { "s" }, ), }; let sort_by = args.sort.unwrap_or(ScoreOrder::Pp).into(); let farm = HashMap::default(); let list_size = match args.size.or(config.list_size) { Some(size) => size, None => match orig.guild_id() { Some(guild_id) => ctx .guild_config() .peek(guild_id, |config| config.list_size) .await .unwrap_or_default(), None => ListSize::default(), }, }; let minimized_pp = match config.minimized_pp { Some(minimized_pp) => minimized_pp, None => match list_size { ListSize::Condensed | ListSize::Detailed => MinimizedPp::default(), ListSize::Single => match orig.guild_id() { Some(guild_id) => ctx .guild_config() .peek(guild_id, |config| config.minimized_pp) .await .unwrap_or_default(), None => MinimizedPp::default(), }, }, }; let pagination = TopPagination::builder() .user(user) .mode(mode) .entries(entries.into_boxed_slice()) .sort_by(sort_by) .farm(farm) .list_size(list_size) .minimized_pp(minimized_pp) .content(content.into_boxed_str()) .msg_owner(msg_owner) .build(); ActiveMessages::builder(pagination) .start_by_update(true) .begin(ctx, orig) .await } async fn process_scores( ctx: &Context, scores: Vec<Score>, mapper_id: u32, sort: Option<ScoreOrder>, ) -> Result<Vec<TopEntry>> { let mut entries = Vec::new(); let maps_id_checksum = scores .iter() .filter_map(|score| score.map.as_ref()) .filter(|map| map.creator_id == mapper_id) .map(|map| (map.map_id as i32, map.checksum.as_deref())) .collect(); let mut maps = ctx.osu_map().maps(&maps_id_checksum).await?; for (i, score) in scores.into_iter().enumerate() { let Some(mut map) = maps.remove(&score.map_id) else { continue }; map.convert_mut(score.mode); let mut calc = ctx.pp(&map).mode(score.mode).mods(score.mods.bits()); let attrs = calc.difficulty().await; let stars = attrs.stars() as f32; let max_combo = attrs.max_combo() as u32; let pp = score.pp.expect("missing pp"); let max_pp = match score .pp .filter(|_| score.grade.eq_letter(Grade::X) && score.mode != GameMode::Mania) { Some(pp) => pp, None => calc.performance().await.pp() as f32, }; let entry = TopEntry { original_idx: i, replay: score.replay, score: ScoreSlim::new(score, pp), map, max_pp, stars, max_combo, }; entries.push(entry); } match sort { None => {} Some(ScoreOrder::Acc) => entries.sort_by(|a, b| { b.score .accuracy .partial_cmp(&a.score.accuracy) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::Bpm) => entries.sort_by(|a, b| { b.map .bpm() .partial_cmp(&a.map.bpm()) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::Combo) => entries.sort_by_key(|entry| Reverse(entry.score.max_combo)), Some(ScoreOrder::Date) => entries.sort_by_key(|entry| Reverse(entry.score.ended_at)), Some(ScoreOrder::Length) => { entries.sort_by(|a, b| { let a_len = a.map.seconds_drain() as f32 / a.score.mods.clock_rate().unwrap_or(1.0); let b_len = b.map.seconds_drain() as f32 / b.score.mods.clock_rate().unwrap_or(1.0); b_len.partial_cmp(&a_len).unwrap_or(Ordering::Equal) }); } Some(ScoreOrder::Misses) => entries.sort_by(|a, b| { b.score .statistics .count_miss .cmp(&a.score.statistics.count_miss) .then_with(|| { let hits_a = a.score.total_hits(); let hits_b = b.score.total_hits(); let ratio_a = a.score.statistics.count_miss as f32 / hits_a as f32; let ratio_b = b.score.statistics.count_miss as f32 / hits_b as f32; ratio_b .partial_cmp(&ratio_a) .unwrap_or(Ordering::Equal) .then_with(|| hits_b.cmp(&hits_a)) }) }), Some(ScoreOrder::Pp) => entries.sort_by(|a, b| { b.score .pp .partial_cmp(&a.score.pp) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::RankedDate) => { entries.sort_by_key(|entry| Reverse(entry.map.ranked_date())) } Some(ScoreOrder::Score) => entries.sort_by_key(|entry| Reverse(entry.score.score)), Some(ScoreOrder::Stars) => { entries.sort_by(|a, b| b.stars.partial_cmp(&a.stars).unwrap_or(Ordering::Equal)) } } Ok(entries) }
prefix_mappertaiko
identifier_name
mapper.rs
use std::{ borrow::Cow, cmp::{Ordering, Reverse}, collections::HashMap, sync::Arc, }; use bathbot_macros::{command, HasName, SlashCommand}; use bathbot_model::ScoreSlim; use bathbot_psql::model::configs::{ListSize, MinimizedPp}; use bathbot_util::{ constants::{GENERAL_ISSUE, OSU_API_ISSUE}, matcher, CowUtils, }; use eyre::{Report, Result}; use rosu_v2::{ prelude::{GameMode, Grade, OsuError, Score}, request::UserId, }; use twilight_interactions::command::{CommandModel, CreateCommand}; use twilight_model::id::{marker::UserMarker, Id}; use super::{require_link, user_not_found, ScoreOrder, TopEntry}; use crate::{ active::{impls::TopPagination, ActiveMessages}, commands::GameModeOption, core::commands::{prefix::Args, CommandOrigin}, manager::redis::{osu::UserArgs, RedisData}, util::{interaction::InteractionCommand, ChannelExt, InteractionCommandExt}, Context, }; #[derive(CommandModel, CreateCommand, HasName, SlashCommand)] #[command( name = "mapper", desc = "How often does the given mapper appear in top a user's top plays", help = "Count the top plays on maps of the given mapper.\n\ It will try to consider guest difficulties so that if a map was created by someone else \ but the given mapper made the guest diff, it will count.\n\ Similarly, if the given mapper created the mapset but someone else guest diff'd, \ it will not count.\n\ This does not always work perfectly, especially for older maps but it's what the api provides." )] pub struct Mapper<'a> { #[command(desc = "Specify a mapper username")] mapper: Cow<'a, str>, #[command(desc = "Specify a gamemode")] mode: Option<GameModeOption>, #[command(desc = "Specify a username")] name: Option<Cow<'a, str>>, #[command(desc = "Choose how the scores should be ordered")] sort: Option<ScoreOrder>, #[command( desc = "Specify a linked discord user", help = "Instead of specifying an osu! username with the `name` option, \ you can use this option to choose a discord user.\n\ Only works on users who have used the `/link` command." )] discord: Option<Id<UserMarker>>, #[command( desc = "Size of the embed", help = "Size of the embed.\n\ `Condensed` shows 10 scores, `Detailed` shows 5, and `Single` shows 1.\n\ The default can be set with the `/config` command." )] size: Option<ListSize>, } impl<'m> Mapper<'m> { fn args( mode: Option<GameModeOption>, mut args: Args<'m>, mapper: Option<&'static str>, ) -> Result<Self, &'static str> { let mapper = match mapper.or_else(|| args.next()) { Some(arg) => arg.into(), None => { let content = "You need to specify at least one osu! username for the mapper. \ If you're not linked, you must specify at least two names."; return Err(content); } }; let mut name = None; let mut discord = None; if let Some(arg) = args.next() { match matcher::get_mention_user(arg) { Some(id) => discord = Some(id), None => name = Some(arg.into()), } } Ok(Self { mapper, mode, name, sort: None, discord, size: None, }) } } #[command] #[desc("How many maps of a user's top100 are made by the given mapper?")] #[help( "Display the top plays of a user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[group(Osu)] async fn prefix_mapper(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(None, args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a mania user's top100 are made by the given mapper?")] #[help( "Display the top plays of a mania user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[alias("mapperm")] #[group(Mania)] pub async fn prefix_mappermania(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Mania), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a taiko user's top100 are made by the given mapper?")] #[help( "Display the top plays of a taiko user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[alias("mappert")] #[group(Taiko)] pub async fn prefix_mappertaiko(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Taiko), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a ctb user's top100 are made by the given mapper?")] #[help( "Display the top plays of a ctb user which were mapped by the given mapper.\n\ Specify the __mapper first__ and the __user second__." )] #[usage("[mapper] [user]")] #[example("\"Hishiro Chizuru\" badewanne3", "monstrata monstrata")] #[aliases("mapperc", "mappercatch")] #[group(Catch)] async fn prefix_mapperctb(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Catch), args, None) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } #[command] #[desc("How many maps of a user's top100 are made by Sotarks?")] #[usage("[username]")] #[example("badewanne3")] #[group(Osu)] pub async fn prefix_sotarks(ctx: Arc<Context>, msg: &Message, args: Args<'_>) -> Result<()> { match Mapper::args(Some(GameModeOption::Osu), args, Some("sotarks")) { Ok(args) => mapper(ctx, msg.into(), args).await, Err(content) => { msg.error(&ctx, content).await?; Ok(()) } } } async fn slash_mapper(ctx: Arc<Context>, mut command: InteractionCommand) -> Result<()> { let args = Mapper::from_interaction(command.input_data())?; mapper(ctx, (&mut command).into(), args).await } async fn mapper(ctx: Arc<Context>, orig: CommandOrigin<'_>, args: Mapper<'_>) -> Result<()>
async fn process_scores( ctx: &Context, scores: Vec<Score>, mapper_id: u32, sort: Option<ScoreOrder>, ) -> Result<Vec<TopEntry>> { let mut entries = Vec::new(); let maps_id_checksum = scores .iter() .filter_map(|score| score.map.as_ref()) .filter(|map| map.creator_id == mapper_id) .map(|map| (map.map_id as i32, map.checksum.as_deref())) .collect(); let mut maps = ctx.osu_map().maps(&maps_id_checksum).await?; for (i, score) in scores.into_iter().enumerate() { let Some(mut map) = maps.remove(&score.map_id) else { continue }; map.convert_mut(score.mode); let mut calc = ctx.pp(&map).mode(score.mode).mods(score.mods.bits()); let attrs = calc.difficulty().await; let stars = attrs.stars() as f32; let max_combo = attrs.max_combo() as u32; let pp = score.pp.expect("missing pp"); let max_pp = match score .pp .filter(|_| score.grade.eq_letter(Grade::X) && score.mode != GameMode::Mania) { Some(pp) => pp, None => calc.performance().await.pp() as f32, }; let entry = TopEntry { original_idx: i, replay: score.replay, score: ScoreSlim::new(score, pp), map, max_pp, stars, max_combo, }; entries.push(entry); } match sort { None => {} Some(ScoreOrder::Acc) => entries.sort_by(|a, b| { b.score .accuracy .partial_cmp(&a.score.accuracy) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::Bpm) => entries.sort_by(|a, b| { b.map .bpm() .partial_cmp(&a.map.bpm()) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::Combo) => entries.sort_by_key(|entry| Reverse(entry.score.max_combo)), Some(ScoreOrder::Date) => entries.sort_by_key(|entry| Reverse(entry.score.ended_at)), Some(ScoreOrder::Length) => { entries.sort_by(|a, b| { let a_len = a.map.seconds_drain() as f32 / a.score.mods.clock_rate().unwrap_or(1.0); let b_len = b.map.seconds_drain() as f32 / b.score.mods.clock_rate().unwrap_or(1.0); b_len.partial_cmp(&a_len).unwrap_or(Ordering::Equal) }); } Some(ScoreOrder::Misses) => entries.sort_by(|a, b| { b.score .statistics .count_miss .cmp(&a.score.statistics.count_miss) .then_with(|| { let hits_a = a.score.total_hits(); let hits_b = b.score.total_hits(); let ratio_a = a.score.statistics.count_miss as f32 / hits_a as f32; let ratio_b = b.score.statistics.count_miss as f32 / hits_b as f32; ratio_b .partial_cmp(&ratio_a) .unwrap_or(Ordering::Equal) .then_with(|| hits_b.cmp(&hits_a)) }) }), Some(ScoreOrder::Pp) => entries.sort_by(|a, b| { b.score .pp .partial_cmp(&a.score.pp) .unwrap_or(Ordering::Equal) }), Some(ScoreOrder::RankedDate) => { entries.sort_by_key(|entry| Reverse(entry.map.ranked_date())) } Some(ScoreOrder::Score) => entries.sort_by_key(|entry| Reverse(entry.score.score)), Some(ScoreOrder::Stars) => { entries.sort_by(|a, b| b.stars.partial_cmp(&a.stars).unwrap_or(Ordering::Equal)) } } Ok(entries) }
{ let msg_owner = orig.user_id()?; let mut config = match ctx.user_config().with_osu_id(msg_owner).await { Ok(config) => config, Err(err) => { let _ = orig.error(&ctx, GENERAL_ISSUE).await; return Err(err); } }; let mode = args .mode .map(GameMode::from) .or(config.mode) .unwrap_or(GameMode::Osu); let user_id = match user_id!(ctx, orig, args) { Some(user_id) => user_id, None => match config.osu.take() { Some(user_id) => UserId::Id(user_id), None => return require_link(&ctx, &orig).await, }, }; let mapper = args.mapper.cow_to_ascii_lowercase(); let mapper_args = UserArgs::username(&ctx, mapper.as_ref()).await.mode(mode); let mapper_fut = ctx.redis().osu_user(mapper_args); // Retrieve the user and their top scores let user_args = UserArgs::rosu_id(&ctx, &user_id).await.mode(mode); let scores_fut = ctx.osu_scores().top().limit(100).exec_with_user(user_args); let (mapper, user, scores) = match tokio::join!(mapper_fut, scores_fut) { (Ok(mapper), Ok((user, scores))) => (mapper, user, scores), (Err(OsuError::NotFound), _) => { let content = format!("Mapper with username `{mapper}` was not found"); return orig.error(&ctx, content).await; } (_, Err(OsuError::NotFound)) => { let content = user_not_found(&ctx, user_id).await; return orig.error(&ctx, content).await; } (Err(err), _) | (_, Err(err)) => { let _ = orig.error(&ctx, OSU_API_ISSUE).await; let err = Report::new(err).wrap_err("failed to get mapper, user, or scores"); return Err(err); } }; let (mapper_name, mapper_id) = match &mapper { RedisData::Original(mapper) => (mapper.username.as_str(), mapper.user_id), RedisData::Archive(mapper) => (mapper.username.as_str(), mapper.user_id), }; let username = user.username(); let entries = match process_scores(&ctx, scores, mapper_id, args.sort).await { Ok(entries) => entries, Err(err) => { let _ = orig.error(&ctx, GENERAL_ISSUE).await; return Err(err.wrap_err("failed to process scores")); } }; // Accumulate all necessary data let content = match mapper_name { "Sotarks" => { let amount = entries.len(); let mut content = format!( "I found {amount} Sotarks map{plural} in `{username}`'s top100, ", amount = amount, plural = if amount != 1 { "s" } else { "" }, ); let to_push = match amount { 0 => "I'm proud \\:)", 1..=4 => "that's already too many...", 5..=8 => "kinda sad \\:/", 9..=15 => "pretty sad \\:(", 16..=25 => "this is so sad \\:((", 26..=35 => "this needs to stop", 36..=49 => "that's a serious problem...", 50 => "that's half. HALF.", 51..=79 => "how do you sleep at night...", 80..=99 => "i'm not even mad, that's just impressive", 100 => "you did it. \"Congrats\".", _ => "wait how did you do that", }; content.push_str(to_push); content } _ => format!( "{count} of `{username}`'{genitive} top score maps were mapped by `{mapper_name}`", count = entries.len(), genitive = if username.ends_with('s') { "" } else { "s" }, ), }; let sort_by = args.sort.unwrap_or(ScoreOrder::Pp).into(); let farm = HashMap::default(); let list_size = match args.size.or(config.list_size) { Some(size) => size, None => match orig.guild_id() { Some(guild_id) => ctx .guild_config() .peek(guild_id, |config| config.list_size) .await .unwrap_or_default(), None => ListSize::default(), }, }; let minimized_pp = match config.minimized_pp { Some(minimized_pp) => minimized_pp, None => match list_size { ListSize::Condensed | ListSize::Detailed => MinimizedPp::default(), ListSize::Single => match orig.guild_id() { Some(guild_id) => ctx .guild_config() .peek(guild_id, |config| config.minimized_pp) .await .unwrap_or_default(), None => MinimizedPp::default(), }, }, }; let pagination = TopPagination::builder() .user(user) .mode(mode) .entries(entries.into_boxed_slice()) .sort_by(sort_by) .farm(farm) .list_size(list_size) .minimized_pp(minimized_pp) .content(content.into_boxed_str()) .msg_owner(msg_owner) .build(); ActiveMessages::builder(pagination) .start_by_update(true) .begin(ctx, orig) .await }
identifier_body
linked-data-provider.service.ts
import { Injectable, Inject } from '@angular/core'; import { ISchemaAgent, IRelatableSchemaAgent, IdentityValue, SchemaNavigator, JsonSchema, ExtendedFieldDescriptor } from 'json-schema-services'; import { FieldContextProvider } from './field-context-provider.service'; import { FormField } from './models/form-field'; import { fieldComponentContextToken, FieldComponentContext } from './models/form-field-context'; import { LinkedDataCache } from "./linked-data-cache.service"; import * as pointer from 'json-pointer'; import * as debuglib from 'debug'; var debug = debuglib('schema-ui:linked-data-provider'); /** * Class that helps with resolving linked field data. * * This class is used by some fields. */ @Injectable() export class LinkedDataProvider { /** * Cached promise so we dont fetch the info twice. */ private data: Promise<any[]>; /** * Cached schema agent for the linked resource. */ private linkedAgent: Promise<ISchemaAgent>; public constructor( @Inject('ISchemaAgent') private agent: IRelatableSchemaAgent, @Inject(fieldComponentContextToken) private field: FieldComponentContext<FormField<any>>, @Inject(FieldContextProvider) private context: FieldContextProvider, @Inject(LinkedDataCache) private cache: LinkedDataCache ) { } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveLinkedData(context?: any, forceReload?: boolean): Promise<any[]> { if (!this.field.meta.field) { return Promise.reject(new Error('MultiSelectField: Field-data not set.')); } if (!this.field.meta.field.link) { return Promise.reject(new Error('MultiSelectField: Field-data does not contain a link! Without a set hyperlink we cannot load the filter values.')); } if (forceReload !== true) { // Check if we already cached it locally. if (this.data != null) { return this.data; } // Fetch state from cache. var state = this.cache.fetch(this.agent.schema.schemaId, this.field.meta.field.link as string); if (state !== null) { return Promise.resolve(state); } } if (this.agent.schema.hasLink(this.field.meta.field.link)) { this.linkedAgent = null; return this.data = this.chooseAppropriateContext(context) .then(ctx => { if (startsWith(this.field.meta.field.link as string, 'list')) { return this.agent .list(1, 1000, this.field.meta.field.link as any, ctx) .then(cursor => cursor.all()); } else if (startsWith(this.field.meta.field.link as string, 'read')) { return this.agent .read<any>(ctx, this.field.meta.field.link as any)
return pointer.get(item, this.field.meta.field.data['pointer'] || '/'); } catch (e) { debug(`[warn] unable to get the data for pointer "${this.field.meta.field.data['pointer']}"`); } return []; }); } else { throw new Error('I cannot resolve this link, tip: prefix the link name with "read-" or "list-" so we know what to do.'); } }) .then(state => { this.cache.push(this.agent.schema.schemaId, this.field.meta.field.link as string, [], state); return state; }); } else { return Promise.reject(new Error('MultiSelectField: Field link is not a valid hyperlink: it doesnt exist on the current schema.')); } } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveSimplifiedLinkedData(context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.resolveLinkedData(ctx, forceReload).then(items => this.mapLinkedData(items, ctx, forceReload, includeOriginal))); } /** * Convert already received values to the simplified format. * * @param items The data to convert/map. * @param context The context of the form (e.g. other form values) */ public mapLinkedData(items: any[], context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.mapMultipleChoice(items, ctx, forceReload, includeOriginal)); } /** * Convert the given list of simplified values to a list of the actual linked objects. * * @param items The simplified linked resource items. */ public getLinkedDataFromSimplifiedLinkedData(items: SimplifiedLinkedResource[]): Promise<any | IdentityValue[]> { if (typeof this.field.meta.items === 'object') { if ((this.field.meta.items as JsonSchema).type === 'object') { return Promise.all([this.resolveLinkedData(), this.createLinkedAgent()]) .then(([data, agent]) => items.map(item => data.find(x => item.value === x[agent.schema.identityProperty]))); } else { return Promise.resolve(items.map(x => x.value)); } } else { return Promise.reject(new Error('The given field cannot be converted to linked items, indexed array schemas are unsupported.')); } } /** * Create an linked schema agent to perform requests on the linked resource. * * @param context The context of the form (e.g. other form values) */ public createLinkedAgent(context?: any, forceReload?: boolean): Promise<ISchemaAgent> { if (forceReload !== true && this.linkedAgent != null) { return this.linkedAgent; } return this.linkedAgent = this.chooseAppropriateContext(context).then(ctx => this.agent.createChildByLink(this.field.meta.field.link as string, ctx)); } /** * Map an entity object to a multiple choice item. * * @param linkName The link that you want to map the choices for. * @param items The items that should be mapped. * @param forceReload * @param includeOriginal Whether or not original. */ private mapMultipleChoice(items: any[], context: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.createLinkedAgent(context, forceReload).then(agent => items .map(item => ({ name: getDisplayName(this.field.meta, items, item), description: getDescription(this.field.meta, item), order: getOrder(this.field.meta, item), value: getIdentityValue(agent.schema, this.field.meta, item), parent: getParentValue(this.field.meta, item), original: includeOriginal ? item : null } as SimplifiedLinkedResource)) .sort((a: SimplifiedLinkedResource, b: SimplifiedLinkedResource) => { if (a.order === b.order) { return String(a.name).localeCompare(String(b.name)); } if (a.order > b.order) { return 1; } return -1; })); } /** * Chooses between the ambient context and the one given, and returns the correct one (eventually). */ private chooseAppropriateContext(context?: any): Promise<any> { if (context != null && Object.keys(context).length) { return Promise.resolve(context); } return Promise.resolve(this.context.getData(false)); } } function getDisplayName(field: ExtendedFieldDescriptor, items: any[], item: any): string { if (field.field.data == null || field.field.data.label == null) { return item['name'] || item['displayName'] || item['internalName'] || item['entity']; } if (field.field.data.parent && field.field.data.mergeLabelWithParents === true) { return getDisplayNameForParent(field, items, item, item[field.field.data.label]); } return item[field.field.data.label]; } function getDisplayNameForParent(field: ExtendedFieldDescriptor, items: any[], item: any, label: string): string { if (item[field.field.data.parent] != null) { var parent = (items || []).find(x => x[field.field.data.value] === item[field.field.data.parent]); if (parent != null) { label = parent[field.field.data.label] + ' › ' + label; if (parent[field.field.data.parent] != null) { label = getDisplayNameForParent(field, items, parent, label); } } } return label; } function getDescription(field: ExtendedFieldDescriptor, item: any): string { if (field.field.data == null || field.field.data.description == null) { return item['description']; } return item[field.field.data.description]; } function getOrder(field: ExtendedFieldDescriptor, item: any): number { if (field.field.data == null || field.field.data.order == null) { return parseInt(item['order'], 10) || 0; } return parseInt(item[field.field.data.order], 10); } function getIdentityValue(schema: SchemaNavigator, field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.label == null) { try { // This will only work with listed properties (not with sub properties) return schema.getIdentityValue(item); } catch (e) { debug(`[warn] I cannot fetch the identity property for ${schema.entity}, please set it manually using the "value" data-property!`); return item['id'] || item['name'] || item['entity']; } } return item[field.field.data.value]; } function getParentValue(field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.parent == null) { return item['parent']; } return item[field.field.data.parent]; } function startsWith(str: string, target: string) { return String(str).slice(0, target.length) == String(target); } /** * Standardized object that represents the linked object, but only contains the values necesarry to display/choose the linked resource. */ export interface SimplifiedLinkedResource { /** * A short name that can be used for short displays. */ name: string; /** * An completer description. */ description: string; /** * The ordering number if applicable or 0. */ order: number; /** * Whether or not this item is disabled. */ disabled?: boolean; /** * The actual identity value(s) that link the two objects. */ value: IdentityValue; /** * The identity value of the parent item, if set in the schema. */ parent?: IdentityValue; /** * Original unmapped item. */ original?: any; }
.then(item => { try {
random_line_split
linked-data-provider.service.ts
import { Injectable, Inject } from '@angular/core'; import { ISchemaAgent, IRelatableSchemaAgent, IdentityValue, SchemaNavigator, JsonSchema, ExtendedFieldDescriptor } from 'json-schema-services'; import { FieldContextProvider } from './field-context-provider.service'; import { FormField } from './models/form-field'; import { fieldComponentContextToken, FieldComponentContext } from './models/form-field-context'; import { LinkedDataCache } from "./linked-data-cache.service"; import * as pointer from 'json-pointer'; import * as debuglib from 'debug'; var debug = debuglib('schema-ui:linked-data-provider'); /** * Class that helps with resolving linked field data. * * This class is used by some fields. */ @Injectable() export class LinkedDataProvider { /** * Cached promise so we dont fetch the info twice. */ private data: Promise<any[]>; /** * Cached schema agent for the linked resource. */ private linkedAgent: Promise<ISchemaAgent>; public constructor( @Inject('ISchemaAgent') private agent: IRelatableSchemaAgent, @Inject(fieldComponentContextToken) private field: FieldComponentContext<FormField<any>>, @Inject(FieldContextProvider) private context: FieldContextProvider, @Inject(LinkedDataCache) private cache: LinkedDataCache ) { } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveLinkedData(context?: any, forceReload?: boolean): Promise<any[]> { if (!this.field.meta.field) { return Promise.reject(new Error('MultiSelectField: Field-data not set.')); } if (!this.field.meta.field.link) { return Promise.reject(new Error('MultiSelectField: Field-data does not contain a link! Without a set hyperlink we cannot load the filter values.')); } if (forceReload !== true) { // Check if we already cached it locally. if (this.data != null) { return this.data; } // Fetch state from cache. var state = this.cache.fetch(this.agent.schema.schemaId, this.field.meta.field.link as string); if (state !== null) { return Promise.resolve(state); } } if (this.agent.schema.hasLink(this.field.meta.field.link)) { this.linkedAgent = null; return this.data = this.chooseAppropriateContext(context) .then(ctx => { if (startsWith(this.field.meta.field.link as string, 'list')) { return this.agent .list(1, 1000, this.field.meta.field.link as any, ctx) .then(cursor => cursor.all()); } else if (startsWith(this.field.meta.field.link as string, 'read')) { return this.agent .read<any>(ctx, this.field.meta.field.link as any) .then(item => { try { return pointer.get(item, this.field.meta.field.data['pointer'] || '/'); } catch (e) { debug(`[warn] unable to get the data for pointer "${this.field.meta.field.data['pointer']}"`); } return []; }); } else { throw new Error('I cannot resolve this link, tip: prefix the link name with "read-" or "list-" so we know what to do.'); } }) .then(state => { this.cache.push(this.agent.schema.schemaId, this.field.meta.field.link as string, [], state); return state; }); } else { return Promise.reject(new Error('MultiSelectField: Field link is not a valid hyperlink: it doesnt exist on the current schema.')); } } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveSimplifiedLinkedData(context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.resolveLinkedData(ctx, forceReload).then(items => this.mapLinkedData(items, ctx, forceReload, includeOriginal))); } /** * Convert already received values to the simplified format. * * @param items The data to convert/map. * @param context The context of the form (e.g. other form values) */ public mapLinkedData(items: any[], context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.mapMultipleChoice(items, ctx, forceReload, includeOriginal)); } /** * Convert the given list of simplified values to a list of the actual linked objects. * * @param items The simplified linked resource items. */ public getLinkedDataFromSimplifiedLinkedData(items: SimplifiedLinkedResource[]): Promise<any | IdentityValue[]> { if (typeof this.field.meta.items === 'object') { if ((this.field.meta.items as JsonSchema).type === 'object') { return Promise.all([this.resolveLinkedData(), this.createLinkedAgent()]) .then(([data, agent]) => items.map(item => data.find(x => item.value === x[agent.schema.identityProperty]))); } else { return Promise.resolve(items.map(x => x.value)); } } else { return Promise.reject(new Error('The given field cannot be converted to linked items, indexed array schemas are unsupported.')); } } /** * Create an linked schema agent to perform requests on the linked resource. * * @param context The context of the form (e.g. other form values) */ public createLinkedAgent(context?: any, forceReload?: boolean): Promise<ISchemaAgent> { if (forceReload !== true && this.linkedAgent != null) { return this.linkedAgent; } return this.linkedAgent = this.chooseAppropriateContext(context).then(ctx => this.agent.createChildByLink(this.field.meta.field.link as string, ctx)); } /** * Map an entity object to a multiple choice item. * * @param linkName The link that you want to map the choices for. * @param items The items that should be mapped. * @param forceReload * @param includeOriginal Whether or not original. */ private mapMultipleChoice(items: any[], context: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.createLinkedAgent(context, forceReload).then(agent => items .map(item => ({ name: getDisplayName(this.field.meta, items, item), description: getDescription(this.field.meta, item), order: getOrder(this.field.meta, item), value: getIdentityValue(agent.schema, this.field.meta, item), parent: getParentValue(this.field.meta, item), original: includeOriginal ? item : null } as SimplifiedLinkedResource)) .sort((a: SimplifiedLinkedResource, b: SimplifiedLinkedResource) => { if (a.order === b.order)
if (a.order > b.order) { return 1; } return -1; })); } /** * Chooses between the ambient context and the one given, and returns the correct one (eventually). */ private chooseAppropriateContext(context?: any): Promise<any> { if (context != null && Object.keys(context).length) { return Promise.resolve(context); } return Promise.resolve(this.context.getData(false)); } } function getDisplayName(field: ExtendedFieldDescriptor, items: any[], item: any): string { if (field.field.data == null || field.field.data.label == null) { return item['name'] || item['displayName'] || item['internalName'] || item['entity']; } if (field.field.data.parent && field.field.data.mergeLabelWithParents === true) { return getDisplayNameForParent(field, items, item, item[field.field.data.label]); } return item[field.field.data.label]; } function getDisplayNameForParent(field: ExtendedFieldDescriptor, items: any[], item: any, label: string): string { if (item[field.field.data.parent] != null) { var parent = (items || []).find(x => x[field.field.data.value] === item[field.field.data.parent]); if (parent != null) { label = parent[field.field.data.label] + ' › ' + label; if (parent[field.field.data.parent] != null) { label = getDisplayNameForParent(field, items, parent, label); } } } return label; } function getDescription(field: ExtendedFieldDescriptor, item: any): string { if (field.field.data == null || field.field.data.description == null) { return item['description']; } return item[field.field.data.description]; } function getOrder(field: ExtendedFieldDescriptor, item: any): number { if (field.field.data == null || field.field.data.order == null) { return parseInt(item['order'], 10) || 0; } return parseInt(item[field.field.data.order], 10); } function getIdentityValue(schema: SchemaNavigator, field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.label == null) { try { // This will only work with listed properties (not with sub properties) return schema.getIdentityValue(item); } catch (e) { debug(`[warn] I cannot fetch the identity property for ${schema.entity}, please set it manually using the "value" data-property!`); return item['id'] || item['name'] || item['entity']; } } return item[field.field.data.value]; } function getParentValue(field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.parent == null) { return item['parent']; } return item[field.field.data.parent]; } function startsWith(str: string, target: string) { return String(str).slice(0, target.length) == String(target); } /** * Standardized object that represents the linked object, but only contains the values necesarry to display/choose the linked resource. */ export interface SimplifiedLinkedResource { /** * A short name that can be used for short displays. */ name: string; /** * An completer description. */ description: string; /** * The ordering number if applicable or 0. */ order: number; /** * Whether or not this item is disabled. */ disabled?: boolean; /** * The actual identity value(s) that link the two objects. */ value: IdentityValue; /** * The identity value of the parent item, if set in the schema. */ parent?: IdentityValue; /** * Original unmapped item. */ original?: any; }
{ return String(a.name).localeCompare(String(b.name)); }
conditional_block
linked-data-provider.service.ts
import { Injectable, Inject } from '@angular/core'; import { ISchemaAgent, IRelatableSchemaAgent, IdentityValue, SchemaNavigator, JsonSchema, ExtendedFieldDescriptor } from 'json-schema-services'; import { FieldContextProvider } from './field-context-provider.service'; import { FormField } from './models/form-field'; import { fieldComponentContextToken, FieldComponentContext } from './models/form-field-context'; import { LinkedDataCache } from "./linked-data-cache.service"; import * as pointer from 'json-pointer'; import * as debuglib from 'debug'; var debug = debuglib('schema-ui:linked-data-provider'); /** * Class that helps with resolving linked field data. * * This class is used by some fields. */ @Injectable() export class LinkedDataProvider { /** * Cached promise so we dont fetch the info twice. */ private data: Promise<any[]>; /** * Cached schema agent for the linked resource. */ private linkedAgent: Promise<ISchemaAgent>; public constructor( @Inject('ISchemaAgent') private agent: IRelatableSchemaAgent, @Inject(fieldComponentContextToken) private field: FieldComponentContext<FormField<any>>, @Inject(FieldContextProvider) private context: FieldContextProvider, @Inject(LinkedDataCache) private cache: LinkedDataCache ) { } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveLinkedData(context?: any, forceReload?: boolean): Promise<any[]> { if (!this.field.meta.field) { return Promise.reject(new Error('MultiSelectField: Field-data not set.')); } if (!this.field.meta.field.link) { return Promise.reject(new Error('MultiSelectField: Field-data does not contain a link! Without a set hyperlink we cannot load the filter values.')); } if (forceReload !== true) { // Check if we already cached it locally. if (this.data != null) { return this.data; } // Fetch state from cache. var state = this.cache.fetch(this.agent.schema.schemaId, this.field.meta.field.link as string); if (state !== null) { return Promise.resolve(state); } } if (this.agent.schema.hasLink(this.field.meta.field.link)) { this.linkedAgent = null; return this.data = this.chooseAppropriateContext(context) .then(ctx => { if (startsWith(this.field.meta.field.link as string, 'list')) { return this.agent .list(1, 1000, this.field.meta.field.link as any, ctx) .then(cursor => cursor.all()); } else if (startsWith(this.field.meta.field.link as string, 'read')) { return this.agent .read<any>(ctx, this.field.meta.field.link as any) .then(item => { try { return pointer.get(item, this.field.meta.field.data['pointer'] || '/'); } catch (e) { debug(`[warn] unable to get the data for pointer "${this.field.meta.field.data['pointer']}"`); } return []; }); } else { throw new Error('I cannot resolve this link, tip: prefix the link name with "read-" or "list-" so we know what to do.'); } }) .then(state => { this.cache.push(this.agent.schema.schemaId, this.field.meta.field.link as string, [], state); return state; }); } else { return Promise.reject(new Error('MultiSelectField: Field link is not a valid hyperlink: it doesnt exist on the current schema.')); } } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveSimplifiedLinkedData(context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.resolveLinkedData(ctx, forceReload).then(items => this.mapLinkedData(items, ctx, forceReload, includeOriginal))); } /** * Convert already received values to the simplified format. * * @param items The data to convert/map. * @param context The context of the form (e.g. other form values) */ public mapLinkedData(items: any[], context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]>
/** * Convert the given list of simplified values to a list of the actual linked objects. * * @param items The simplified linked resource items. */ public getLinkedDataFromSimplifiedLinkedData(items: SimplifiedLinkedResource[]): Promise<any | IdentityValue[]> { if (typeof this.field.meta.items === 'object') { if ((this.field.meta.items as JsonSchema).type === 'object') { return Promise.all([this.resolveLinkedData(), this.createLinkedAgent()]) .then(([data, agent]) => items.map(item => data.find(x => item.value === x[agent.schema.identityProperty]))); } else { return Promise.resolve(items.map(x => x.value)); } } else { return Promise.reject(new Error('The given field cannot be converted to linked items, indexed array schemas are unsupported.')); } } /** * Create an linked schema agent to perform requests on the linked resource. * * @param context The context of the form (e.g. other form values) */ public createLinkedAgent(context?: any, forceReload?: boolean): Promise<ISchemaAgent> { if (forceReload !== true && this.linkedAgent != null) { return this.linkedAgent; } return this.linkedAgent = this.chooseAppropriateContext(context).then(ctx => this.agent.createChildByLink(this.field.meta.field.link as string, ctx)); } /** * Map an entity object to a multiple choice item. * * @param linkName The link that you want to map the choices for. * @param items The items that should be mapped. * @param forceReload * @param includeOriginal Whether or not original. */ private mapMultipleChoice(items: any[], context: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.createLinkedAgent(context, forceReload).then(agent => items .map(item => ({ name: getDisplayName(this.field.meta, items, item), description: getDescription(this.field.meta, item), order: getOrder(this.field.meta, item), value: getIdentityValue(agent.schema, this.field.meta, item), parent: getParentValue(this.field.meta, item), original: includeOriginal ? item : null } as SimplifiedLinkedResource)) .sort((a: SimplifiedLinkedResource, b: SimplifiedLinkedResource) => { if (a.order === b.order) { return String(a.name).localeCompare(String(b.name)); } if (a.order > b.order) { return 1; } return -1; })); } /** * Chooses between the ambient context and the one given, and returns the correct one (eventually). */ private chooseAppropriateContext(context?: any): Promise<any> { if (context != null && Object.keys(context).length) { return Promise.resolve(context); } return Promise.resolve(this.context.getData(false)); } } function getDisplayName(field: ExtendedFieldDescriptor, items: any[], item: any): string { if (field.field.data == null || field.field.data.label == null) { return item['name'] || item['displayName'] || item['internalName'] || item['entity']; } if (field.field.data.parent && field.field.data.mergeLabelWithParents === true) { return getDisplayNameForParent(field, items, item, item[field.field.data.label]); } return item[field.field.data.label]; } function getDisplayNameForParent(field: ExtendedFieldDescriptor, items: any[], item: any, label: string): string { if (item[field.field.data.parent] != null) { var parent = (items || []).find(x => x[field.field.data.value] === item[field.field.data.parent]); if (parent != null) { label = parent[field.field.data.label] + ' › ' + label; if (parent[field.field.data.parent] != null) { label = getDisplayNameForParent(field, items, parent, label); } } } return label; } function getDescription(field: ExtendedFieldDescriptor, item: any): string { if (field.field.data == null || field.field.data.description == null) { return item['description']; } return item[field.field.data.description]; } function getOrder(field: ExtendedFieldDescriptor, item: any): number { if (field.field.data == null || field.field.data.order == null) { return parseInt(item['order'], 10) || 0; } return parseInt(item[field.field.data.order], 10); } function getIdentityValue(schema: SchemaNavigator, field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.label == null) { try { // This will only work with listed properties (not with sub properties) return schema.getIdentityValue(item); } catch (e) { debug(`[warn] I cannot fetch the identity property for ${schema.entity}, please set it manually using the "value" data-property!`); return item['id'] || item['name'] || item['entity']; } } return item[field.field.data.value]; } function getParentValue(field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.parent == null) { return item['parent']; } return item[field.field.data.parent]; } function startsWith(str: string, target: string) { return String(str).slice(0, target.length) == String(target); } /** * Standardized object that represents the linked object, but only contains the values necesarry to display/choose the linked resource. */ export interface SimplifiedLinkedResource { /** * A short name that can be used for short displays. */ name: string; /** * An completer description. */ description: string; /** * The ordering number if applicable or 0. */ order: number; /** * Whether or not this item is disabled. */ disabled?: boolean; /** * The actual identity value(s) that link the two objects. */ value: IdentityValue; /** * The identity value of the parent item, if set in the schema. */ parent?: IdentityValue; /** * Original unmapped item. */ original?: any; }
{ return this.chooseAppropriateContext(context).then(ctx => this.mapMultipleChoice(items, ctx, forceReload, includeOriginal)); }
identifier_body
linked-data-provider.service.ts
import { Injectable, Inject } from '@angular/core'; import { ISchemaAgent, IRelatableSchemaAgent, IdentityValue, SchemaNavigator, JsonSchema, ExtendedFieldDescriptor } from 'json-schema-services'; import { FieldContextProvider } from './field-context-provider.service'; import { FormField } from './models/form-field'; import { fieldComponentContextToken, FieldComponentContext } from './models/form-field-context'; import { LinkedDataCache } from "./linked-data-cache.service"; import * as pointer from 'json-pointer'; import * as debuglib from 'debug'; var debug = debuglib('schema-ui:linked-data-provider'); /** * Class that helps with resolving linked field data. * * This class is used by some fields. */ @Injectable() export class LinkedDataProvider { /** * Cached promise so we dont fetch the info twice. */ private data: Promise<any[]>; /** * Cached schema agent for the linked resource. */ private linkedAgent: Promise<ISchemaAgent>; public constructor( @Inject('ISchemaAgent') private agent: IRelatableSchemaAgent, @Inject(fieldComponentContextToken) private field: FieldComponentContext<FormField<any>>, @Inject(FieldContextProvider) private context: FieldContextProvider, @Inject(LinkedDataCache) private cache: LinkedDataCache ) { } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveLinkedData(context?: any, forceReload?: boolean): Promise<any[]> { if (!this.field.meta.field) { return Promise.reject(new Error('MultiSelectField: Field-data not set.')); } if (!this.field.meta.field.link) { return Promise.reject(new Error('MultiSelectField: Field-data does not contain a link! Without a set hyperlink we cannot load the filter values.')); } if (forceReload !== true) { // Check if we already cached it locally. if (this.data != null) { return this.data; } // Fetch state from cache. var state = this.cache.fetch(this.agent.schema.schemaId, this.field.meta.field.link as string); if (state !== null) { return Promise.resolve(state); } } if (this.agent.schema.hasLink(this.field.meta.field.link)) { this.linkedAgent = null; return this.data = this.chooseAppropriateContext(context) .then(ctx => { if (startsWith(this.field.meta.field.link as string, 'list')) { return this.agent .list(1, 1000, this.field.meta.field.link as any, ctx) .then(cursor => cursor.all()); } else if (startsWith(this.field.meta.field.link as string, 'read')) { return this.agent .read<any>(ctx, this.field.meta.field.link as any) .then(item => { try { return pointer.get(item, this.field.meta.field.data['pointer'] || '/'); } catch (e) { debug(`[warn] unable to get the data for pointer "${this.field.meta.field.data['pointer']}"`); } return []; }); } else { throw new Error('I cannot resolve this link, tip: prefix the link name with "read-" or "list-" so we know what to do.'); } }) .then(state => { this.cache.push(this.agent.schema.schemaId, this.field.meta.field.link as string, [], state); return state; }); } else { return Promise.reject(new Error('MultiSelectField: Field link is not a valid hyperlink: it doesnt exist on the current schema.')); } } /** * Get an linked resource as simplified data. * * @param context The context of the form (e.g. other form values) */ public resolveSimplifiedLinkedData(context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.resolveLinkedData(ctx, forceReload).then(items => this.mapLinkedData(items, ctx, forceReload, includeOriginal))); } /** * Convert already received values to the simplified format. * * @param items The data to convert/map. * @param context The context of the form (e.g. other form values) */ public mapLinkedData(items: any[], context?: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.chooseAppropriateContext(context).then(ctx => this.mapMultipleChoice(items, ctx, forceReload, includeOriginal)); } /** * Convert the given list of simplified values to a list of the actual linked objects. * * @param items The simplified linked resource items. */ public getLinkedDataFromSimplifiedLinkedData(items: SimplifiedLinkedResource[]): Promise<any | IdentityValue[]> { if (typeof this.field.meta.items === 'object') { if ((this.field.meta.items as JsonSchema).type === 'object') { return Promise.all([this.resolveLinkedData(), this.createLinkedAgent()]) .then(([data, agent]) => items.map(item => data.find(x => item.value === x[agent.schema.identityProperty]))); } else { return Promise.resolve(items.map(x => x.value)); } } else { return Promise.reject(new Error('The given field cannot be converted to linked items, indexed array schemas are unsupported.')); } } /** * Create an linked schema agent to perform requests on the linked resource. * * @param context The context of the form (e.g. other form values) */ public createLinkedAgent(context?: any, forceReload?: boolean): Promise<ISchemaAgent> { if (forceReload !== true && this.linkedAgent != null) { return this.linkedAgent; } return this.linkedAgent = this.chooseAppropriateContext(context).then(ctx => this.agent.createChildByLink(this.field.meta.field.link as string, ctx)); } /** * Map an entity object to a multiple choice item. * * @param linkName The link that you want to map the choices for. * @param items The items that should be mapped. * @param forceReload * @param includeOriginal Whether or not original. */ private mapMultipleChoice(items: any[], context: any, forceReload?: boolean, includeOriginal?: boolean): Promise<SimplifiedLinkedResource[]> { return this.createLinkedAgent(context, forceReload).then(agent => items .map(item => ({ name: getDisplayName(this.field.meta, items, item), description: getDescription(this.field.meta, item), order: getOrder(this.field.meta, item), value: getIdentityValue(agent.schema, this.field.meta, item), parent: getParentValue(this.field.meta, item), original: includeOriginal ? item : null } as SimplifiedLinkedResource)) .sort((a: SimplifiedLinkedResource, b: SimplifiedLinkedResource) => { if (a.order === b.order) { return String(a.name).localeCompare(String(b.name)); } if (a.order > b.order) { return 1; } return -1; })); } /** * Chooses between the ambient context and the one given, and returns the correct one (eventually). */ private chooseAppropriateContext(context?: any): Promise<any> { if (context != null && Object.keys(context).length) { return Promise.resolve(context); } return Promise.resolve(this.context.getData(false)); } } function getDisplayName(field: ExtendedFieldDescriptor, items: any[], item: any): string { if (field.field.data == null || field.field.data.label == null) { return item['name'] || item['displayName'] || item['internalName'] || item['entity']; } if (field.field.data.parent && field.field.data.mergeLabelWithParents === true) { return getDisplayNameForParent(field, items, item, item[field.field.data.label]); } return item[field.field.data.label]; } function getDisplayNameForParent(field: ExtendedFieldDescriptor, items: any[], item: any, label: string): string { if (item[field.field.data.parent] != null) { var parent = (items || []).find(x => x[field.field.data.value] === item[field.field.data.parent]); if (parent != null) { label = parent[field.field.data.label] + ' › ' + label; if (parent[field.field.data.parent] != null) { label = getDisplayNameForParent(field, items, parent, label); } } } return label; } function getDescription(field: ExtendedFieldDescriptor, item: any): string { if (field.field.data == null || field.field.data.description == null) { return item['description']; } return item[field.field.data.description]; } function getOrder(field: ExtendedFieldDescriptor, item: any): number { if (field.field.data == null || field.field.data.order == null) { return parseInt(item['order'], 10) || 0; } return parseInt(item[field.field.data.order], 10); } function getIdentityValue(schema: SchemaNavigator, field: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.label == null) { try { // This will only work with listed properties (not with sub properties) return schema.getIdentityValue(item); } catch (e) { debug(`[warn] I cannot fetch the identity property for ${schema.entity}, please set it manually using the "value" data-property!`); return item['id'] || item['name'] || item['entity']; } } return item[field.field.data.value]; } function ge
ield: ExtendedFieldDescriptor, item: any): IdentityValue { if (field.field.data == null || field.field.data.parent == null) { return item['parent']; } return item[field.field.data.parent]; } function startsWith(str: string, target: string) { return String(str).slice(0, target.length) == String(target); } /** * Standardized object that represents the linked object, but only contains the values necesarry to display/choose the linked resource. */ export interface SimplifiedLinkedResource { /** * A short name that can be used for short displays. */ name: string; /** * An completer description. */ description: string; /** * The ordering number if applicable or 0. */ order: number; /** * Whether or not this item is disabled. */ disabled?: boolean; /** * The actual identity value(s) that link the two objects. */ value: IdentityValue; /** * The identity value of the parent item, if set in the schema. */ parent?: IdentityValue; /** * Original unmapped item. */ original?: any; }
tParentValue(f
identifier_name
dp.rs
// 我们开始学习动态规划吧 use std::cmp::min; // https://leetcode-cn.com/problems/maximum-subarray // 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口 pub fn max_sub_array(nums: Vec<i32>) -> i32 { let mut sum = nums[0]; let mut ans = nums[0]; for i in 1..nums.len() { if sum > 0 { // add positive sum means larger sum += nums[i]; } else { // start from new one means larger sum = nums[i]; } // ans always store the largest sum ans = std::cmp::max(sum, ans); } ans } // https://leetcode-cn.com/problems/climbing-stairs/solution/ // basic dynamic programming pub fn climb_stairs(n: i32) -> i32 { if n == 0 || n == 1 { return 1; } // f(n) = f(n-1) + f(n-2) // iterative is harder than recursive let mut n_1 = 1; // f(n-1) let mut n_2 = 1; // f(n-2) let mut ans = 0; for _ in 1..n { ans = n_1 + n_2; n_1 = n_2; n_2 = ans; } ans } // https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/ // sell stock using state machine // this is the solution for infinite k pub fn max_profit_infinite(prices: Vec<i32>) -> i32 { let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty = 0; for price in prices { s_keep = std::cmp::max(s_keep, s_empty - price); s_empty = std::cmp::max(s_empty, s_keep + price); } return s_empty; } // https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/ // 用有限状态机的方式去解题 use std::i32; pub fn max_profit_cool(prices: Vec<i32>) -> i32 { let n = prices.len(); let mut dp = vec![vec![i32::MIN; 3]; n+1]; // 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来 // 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来 // 2 冷冻期,过了一天转入状态0。可以从状态1转过来。 // 0 明天可买入,要么今天不买,要么今天是冷冻期 // 1 明天可卖出:要么今天买,要么今天不卖 // 2 明天是冷冻,那就今天卖了吧 dp[0][0] = 0; for i in 0..n { dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移 dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]); dp[i+1][2] = dp[i][1] + prices[i]; // println!("dp[i][0]: {}", dp[i][0]); // println!("dp[i][1]: {}", dp[i][1]); // println!("dp[i][2]: {}", dp[i][2]); } return dp[n][0].max(dp[n][2]); } pub fn max_profit_once(prices: Vec<i32>) -> i32 { // suffix 0 means no trade (buy or sell) happen // 1 means it happend // let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty_0 = 0; let mut s_keep_1 = std::i32::MIN; let mut s_empty_1 = std::i32::MIN; for price in prices { s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price); s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price); } return std::cmp::max(s_empty_1, 0); } pub fn max_profit_twice(prices: Vec<i32>) -> i32 { // suffix 0 means no trade (buy or sell) happen // 1 means it happend // let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty_0 = 0; let mut s_keep_1 = std::i32::MIN; let mut s_empty_1 = std::i32::MIN; let mut s_keep_2 = std::i32::MIN; let mut s_empty_2 = std::i32::MIN; for price in prices { s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price); s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price); s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price); s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price); } return std::cmp::max(s_empty_2, 0); } // this one works but consume too much memory pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 { // from example above, we know the initial value is 0 // here, k become a variable, some we need a matrix to // store different status // how many status we have? // empty or keep => 2 // trade times => k // so we have 2k status let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep let mut s_times: Vec<[i32;2]> = Vec::new(); let k: usize = k as usize; for i in 0..k+1 { s_times.push(s_trade.clone()); } s_times[0][0] = 0; for price in prices { for j in 0..k { s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price); s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price); } } return std::cmp::max(0, s_times[k][0]); } // memory efficient version pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 { // here if k in unreasonable large, switch to infinite version let k: usize = k as usize; if k > prices.len()/2 { return max_profit_infinite(prices); } let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep let mut s_times: Vec<[i32;2]> = Vec::new(); for i in 0..k+1 { s_times.push(s_trade.clone()); } s_times[0][0] = 0; for price in prices { for j in 0..k { s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price); s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price); } } return std::cmp::max(0, s_times[k][0]); } // shortest path // https://leetcode-cn.com/problems/minimum-path-sum/ // way: set grid value as the cost to get there // matrix: // 1 0 1 1 1 2 // 2 3 5 => 3 4 7 // 5 3 2 8 7 9 pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); let mut cost = grid.clone(); for r in 0..row { for c in 0..col { if r == 0 && c == 0 { cost[r][c] = grid[r][c]; } else if r == 0 { cost[r][c] = grid[r][c] + cost[r][c-1]; } else if c == 0 { cost[r][c] = grid[r][c] + cost[r-1][c]; } else { cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]); } } } return cost[row-1][col-1]; } // https://leetcode-cn.com/problems/generate-parentheses/solution/ pub fn generate_parenthesis(n: i32) -> Vec<String> { if n == 0 { return Vec::new(); } let mut dp = vec![Vec::<String>::new(); (n+1) as usize]; dp[0] = vec![String::from("")]; for i in 1..=n { println!("Round {}", i); let mut cur = vec![]; for j in 0..i { let left = &dp[j as usize]; let right = &dp[(i-j-1) as usize]; for l in left { for r in right { let tmp = format!("({}){}", l, r); println!("new string {}", tmp); cur.push(tmp); } } } dp[i as usize] = cur; } let res = dp.pop().unwrap(); return res } // https://leetcode-cn.com/problems/unique-paths/ // 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1] pub fn unique_paths(m: i32, n: i32) -> i32 { if m == 1 || n == 1 { return 1; } else { return unique_paths(m - 1, n) + unique_paths(m, n - 1); } } pub fn unique_paths_iter(m: i32, n: i32) -> i32 { let m: usize = m as usize; let n: usize = n as usize; let mut cache = vec![vec![0; n]; m]; for i in 0..m { for j in 0..n { if i == 0 || j == 0 { cache[i][j] = 1; } else { cache[i][j] = cache[i-1][j] + cache[i][j-1]; } } } return cache[m-1][n-1] as i32; } // https://leetcode-cn.com/problems/unique-paths-ii/solution/ pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 { let m = obstacle_grid.len(); let n = obstacle_grid[0].len(); let mut cache = vec![vec![0; n]; m]; for i in 0..m { for j in 0..n { if obstacle_grid[i][j] == 1 { cache[i][j] = 0; } else if i == 0 && j == 0 { cache[i][j] = 1; } else if i == 0 { cache[i][j] = cache[i][j-1]; } else if j == 0 { cache[i][j] = cache[i-1][j]; } else { cache[i][j] = cache[i-1][j] + cache[i][j-1]; } } } return cache[m-1][n-1]; } // https://leetcode-cn.com/problems/house-robber/submissions/ pub fn rob(nums: Vec<i32>) -> i32 { let len = nums.len(); if len == 0 { return 0; } else if len == 1 { return nums[0]; } else if len == 2 { return nums[0].max(nums[1]); } // else len > 2 let mut m1 = nums[0]; let mut m2 = nums[1].max(m1); for i in 2..nums.len() { println!("m1 {} m2 {}", m1, m2); m1 = (m1 + nums[i]).max(m2); let temp = m2; m2 = m1; m1 = temp; } println!("m1 {} m2 {}", m1, m2); return m2; } // https://leetcode-cn.com/problems/maximum-product-subarray/submissions/ pub fn max_product(nums: Vec<i32>) -> i32 { if nums.len() == 0 { return 0; } let (mut max, mut min) = (1, 1); let mut res = std::i32::MIN; let len = nums.len(); // 由于有 if 在循环里面,所以速度慢! for n in nums { let t_max = max; let t_min = min; max = (t_max * n).max(n).max(t_min * n); min = (t_min * n).min(n).min(t_max * n); res = res.max(max); } println!("{}", res); return res; } // https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/ // 由于只买卖一次,所以只需要记录最低价格就好了 pub fn max_profit(mut prices: Vec<i32>) -> i32 { let mut profit = 0; let mut cost = 1<<30; for i in 0..prices.len() { cost = cost.min(prices[i]); profit = (prices[i] - cost).max(profit); } return profit; } // https://leetcode-cn.com/problems/word-break/ pub fn word_break(s: String, word_dict: Vec<String>) -> bool { if word_dict.is_empty() { return false; } let len = s.len(); let mut dp: Vec<bool> = vec![false; len+1]; dp[0] = true; for i in 0..len { if !dp[i] { continue; } for w in &word_dict { let end = i + w.len(); if end <= len && !dp[end] && &s[i..end] == w.as_str() { dp[end] = true; } } } dp[len] } // https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/ // 相当于填表 pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 { let row = a.len(); let col = b.len(); let mut dp = vec![vec![0; col]; row]; let mut res = 0; for i in 0..row { for j in 0..col { if a[i] == b[j] { let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] }; dp[i][j] = last + 1; res = res.max(dp[i][j]); } else { dp[i][j] = 0; } } } return res as i32; } // https://leetcode-cn.com/problems/unique-paths-ii/ pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 { let row = obstacle_grid.len(); let col = obstacle_grid[0].len(); let mut dp = vec![vec![0; col]; row]; // init first row and col for i in 0..row { for j in 0..col { if obstacle_grid[i][j] == 0 { if i == 0 && j == 0 { dp[i][j] = 1; } else if i == 0 { dp[i][j] = dp[i][j-1]; } else if j == 0 { dp[i][j] = dp[i-1][j]; } else {
} else { grid[i][j] += grid[i-1][j].max(grid[i][j-1]); } } } return grid[row-1][col-1]; } // https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/ pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 { let n = triangle.len(); let mut dp = vec![0; n+1]; for i in (0..n).rev() { for j in 0..=i { println!("i, j = {}, {}", i, j); dp[j] = dp[j].min(dp[j+1]) + triangle[i][j]; } } return dp[0]; } // https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/ pub fn two_sum(n: i32) -> Vec<f64> { let mut res = vec![1./6.;6]; for i in 1..n as usize { let mut temp = vec![0.0; 5 * i + 6]; for j in 0..res.len() { for k in 0..6 { temp[j+k] += res[j] * 1.0/6.0; } } res = temp; } return res; } // https://leetcode-cn.com/problems/minimum-path-sum/submissions/ pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); for i in 1..row { grid[i][0] += grid[i-1][0]; } for j in 1..col { grid[0][j] += grid[0][j-1]; } for i in 1..row { for j in 1..col { grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j]; } } return grid[row-1][col-1]; } fn main() { // generate_parenthesis(4); // println!("(1,1) {}", unique_paths_iter(1, 1)); // println!("(2,2) {}", unique_paths_iter(2, 2)); // println!("(3,2) {}", unique_paths_iter(3, 2)); // println!("(2,3) {}", unique_paths_iter(2, 3)); // rob([1, 3, 1, 3, 100].to_vec()); // max_product([-2,0,-1].to_vec()); // max_product([-1,-2,-9,-6].to_vec()); // max_profit([1,2,3].to_vec()); // word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec()); // dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec())); // dbg!(max_profit_cool([1,2,3,0,2].to_vec())); // let tri = [ // [2].to_vec(), // [3,4].to_vec(), // [6,5,7].to_vec(), // [4,1,8,3].to_vec() // ].to_vec(); // dbg!(minimum_total(tri)); // dbg!(two_sum(5)); min_path_sum2([ [1,3,1].to_vec(), [1,5,1].to_vec(), [4,2,1].to_vec(), ].to_vec()); }
dp[i][j] = dp[i-1][j] + dp[i][j-1]; } } else { // 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写 dp[i][j] = 0; } } } return dp[row-1][col-1]; } // https://leetcode-cn.com/problems/re-space-lcci/ pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 { 42 } // https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/ pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); for i in 0..row { for j in 0..col { if i == 0 && j == 0 { // pass } else if i == 0 { grid[i][j] += grid[i][j-1]; } else if j == 0 { grid[i][j] += grid[i-1][j];
identifier_body
dp.rs
// 我们开始学习动态规划吧 use std::cmp::min; // https://leetcode-cn.com/problems/maximum-subarray // 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口 pub fn max_sub_array(nums: Vec<i32>) -> i32 { let mut sum = nums[0]; let mut ans = nums[0]; for i in 1..nums.len() { if sum > 0 { // add positive sum means larger sum += nums[i]; } else { // start from new one means larger sum = nums[i]; } // ans always store the largest sum ans = std::cmp::max(sum, ans); } ans } // https://leetcode-cn.com/problems/climbing-stairs/solution/ // basic dynamic programming pub fn climb_stairs(n: i32) -> i32 { if n == 0 || n == 1 { return 1; } // f(n) = f(n-1) + f(n-2) // iterative is harder than recursive let mut n_1 = 1; // f(n-1) let mut n_2 = 1; // f(n-2) let mut ans = 0; for _ in 1..n { ans = n_1 + n_2; n_1 = n_2; n_2 = ans; } ans } // https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/ // sell stock using state machine // this is the solution for infinite k pub fn max_profit_infinite(prices: Vec<i32>) -> i32 { let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty = 0; for price in prices { s_keep = std::cmp::max(s_keep, s_empty - price); s_empty = std::cmp::max(s_empty, s_keep + price); } return s_empty; } // https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/ // 用有限状态机的方式去解题 use std::i32; pub fn max_profit_cool(prices: Vec<i32>) -> i32 { let n = prices.len(); let mut dp = vec![vec![i32::MIN; 3]; n+1]; // 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来 // 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来 // 2 冷冻期,过了一天转入状态0。可以从状态1转过来。 // 0 明天可买入,要么今天不买,要么今天是冷冻期 // 1 明天可卖出:要么今天买,要么今天不卖 // 2 明天是冷冻,那就今天卖了吧 dp[0][0] = 0; for i in 0..n { dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移 dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]); dp[i+1][2] = dp[i][1] + prices[i]; // println!("dp[i][0]: {}", dp[i][0]); // println!("dp[i][1]: {}", dp[i][1]); // println!("dp[i][2]: {}", dp[i][2]); } return dp[n][0].max(dp[n][2]); } pub fn max_profit_once(prices: Vec<i32>) -> i32 { // suffix 0 means no trade (buy or sell) happen // 1 means it happend // let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty_0 = 0; let mut s_keep_1 = std::i32::MIN; let mut s_empty_1 = std::i32::MIN; for price in prices { s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price); s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price); } return std::cmp::max(s_empty_1, 0); } pub fn max_profit_twice(prices: Vec<i32>) -> i32 { // suffix 0 means no trade (buy or sell) happen // 1 means it happend // let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty_0 = 0; let mut s_keep_1 = std::i32::MIN; let mut s_empty_1 = std::i32::MIN; let mut s_keep_2 = std::i32::MIN; let mut s_empty_2 = std::i32::MIN; for price in prices { s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price); s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price); s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price); s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price); } return std::cmp::max(s_empty_2, 0); } // this one works but consume too much memory pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 { // from example above, we know the initial value is 0 // here, k become a variable, some we need a matrix to // store different status // how many status we have? // empty or keep => 2 // trade times => k // so we have 2k status let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep let mut s_times: Vec<[i32;2]> = Vec::new(); let k: usize = k as usize; for i in 0..k+1 { s_times.push(s_trade.clone()); } s_times[0][0] = 0; for price in prices { for j in 0..k { s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price); s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price); } } return std::cmp::max(0, s_times[k][0]); } // memory efficient version pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 { // here if k in unreasonable large, switch to infinite version let k: usize = k as usize; if k > prices.len()/2 { return max_profit_infinite(prices); } let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep let mut s_times: Vec<[i32;2]> = Vec::new(); for i in 0..k+1 { s_times.push(s_trade.clone()); } s_times[0][0] = 0; for price in prices { for j in 0..k { s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price); s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price); } } return std::cmp::max(0, s_times[k][0]); } // shortest path // https://leetcode-cn.com/problems/minimum-path-sum/ // way: set grid value as the cost to get there // matrix: // 1 0 1 1 1 2 // 2 3 5 => 3 4 7 // 5 3 2 8 7 9 pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); let mut cost = grid.clone(); for r in 0..row { for c in 0..col { if r == 0 && c == 0 { cost[r][c] = grid[r][c]; } else if r == 0 { cost[r][c] = grid[r][c] + cost[r][c-1]; } else if c == 0 { cost[r][c] = gri
st[r-1][c]; } else { cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]); } } } return cost[row-1][col-1]; } // https://leetcode-cn.com/problems/generate-parentheses/solution/ pub fn generate_parenthesis(n: i32) -> Vec<String> { if n == 0 { return Vec::new(); } let mut dp = vec![Vec::<String>::new(); (n+1) as usize]; dp[0] = vec![String::from("")]; for i in 1..=n { println!("Round {}", i); let mut cur = vec![]; for j in 0..i { let left = &dp[j as usize]; let right = &dp[(i-j-1) as usize]; for l in left { for r in right { let tmp = format!("({}){}", l, r); println!("new string {}", tmp); cur.push(tmp); } } } dp[i as usize] = cur; } let res = dp.pop().unwrap(); return res } // https://leetcode-cn.com/problems/unique-paths/ // 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1] pub fn unique_paths(m: i32, n: i32) -> i32 { if m == 1 || n == 1 { return 1; } else { return unique_paths(m - 1, n) + unique_paths(m, n - 1); } } pub fn unique_paths_iter(m: i32, n: i32) -> i32 { let m: usize = m as usize; let n: usize = n as usize; let mut cache = vec![vec![0; n]; m]; for i in 0..m { for j in 0..n { if i == 0 || j == 0 { cache[i][j] = 1; } else { cache[i][j] = cache[i-1][j] + cache[i][j-1]; } } } return cache[m-1][n-1] as i32; } // https://leetcode-cn.com/problems/unique-paths-ii/solution/ pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 { let m = obstacle_grid.len(); let n = obstacle_grid[0].len(); let mut cache = vec![vec![0; n]; m]; for i in 0..m { for j in 0..n { if obstacle_grid[i][j] == 1 { cache[i][j] = 0; } else if i == 0 && j == 0 { cache[i][j] = 1; } else if i == 0 { cache[i][j] = cache[i][j-1]; } else if j == 0 { cache[i][j] = cache[i-1][j]; } else { cache[i][j] = cache[i-1][j] + cache[i][j-1]; } } } return cache[m-1][n-1]; } // https://leetcode-cn.com/problems/house-robber/submissions/ pub fn rob(nums: Vec<i32>) -> i32 { let len = nums.len(); if len == 0 { return 0; } else if len == 1 { return nums[0]; } else if len == 2 { return nums[0].max(nums[1]); } // else len > 2 let mut m1 = nums[0]; let mut m2 = nums[1].max(m1); for i in 2..nums.len() { println!("m1 {} m2 {}", m1, m2); m1 = (m1 + nums[i]).max(m2); let temp = m2; m2 = m1; m1 = temp; } println!("m1 {} m2 {}", m1, m2); return m2; } // https://leetcode-cn.com/problems/maximum-product-subarray/submissions/ pub fn max_product(nums: Vec<i32>) -> i32 { if nums.len() == 0 { return 0; } let (mut max, mut min) = (1, 1); let mut res = std::i32::MIN; let len = nums.len(); // 由于有 if 在循环里面,所以速度慢! for n in nums { let t_max = max; let t_min = min; max = (t_max * n).max(n).max(t_min * n); min = (t_min * n).min(n).min(t_max * n); res = res.max(max); } println!("{}", res); return res; } // https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/ // 由于只买卖一次,所以只需要记录最低价格就好了 pub fn max_profit(mut prices: Vec<i32>) -> i32 { let mut profit = 0; let mut cost = 1<<30; for i in 0..prices.len() { cost = cost.min(prices[i]); profit = (prices[i] - cost).max(profit); } return profit; } // https://leetcode-cn.com/problems/word-break/ pub fn word_break(s: String, word_dict: Vec<String>) -> bool { if word_dict.is_empty() { return false; } let len = s.len(); let mut dp: Vec<bool> = vec![false; len+1]; dp[0] = true; for i in 0..len { if !dp[i] { continue; } for w in &word_dict { let end = i + w.len(); if end <= len && !dp[end] && &s[i..end] == w.as_str() { dp[end] = true; } } } dp[len] } // https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/ // 相当于填表 pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 { let row = a.len(); let col = b.len(); let mut dp = vec![vec![0; col]; row]; let mut res = 0; for i in 0..row { for j in 0..col { if a[i] == b[j] { let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] }; dp[i][j] = last + 1; res = res.max(dp[i][j]); } else { dp[i][j] = 0; } } } return res as i32; } // https://leetcode-cn.com/problems/unique-paths-ii/ pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 { let row = obstacle_grid.len(); let col = obstacle_grid[0].len(); let mut dp = vec![vec![0; col]; row]; // init first row and col for i in 0..row { for j in 0..col { if obstacle_grid[i][j] == 0 { if i == 0 && j == 0 { dp[i][j] = 1; } else if i == 0 { dp[i][j] = dp[i][j-1]; } else if j == 0 { dp[i][j] = dp[i-1][j]; } else { dp[i][j] = dp[i-1][j] + dp[i][j-1]; } } else { // 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写 dp[i][j] = 0; } } } return dp[row-1][col-1]; } // https://leetcode-cn.com/problems/re-space-lcci/ pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 { 42 } // https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/ pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); for i in 0..row { for j in 0..col { if i == 0 && j == 0 { // pass } else if i == 0 { grid[i][j] += grid[i][j-1]; } else if j == 0 { grid[i][j] += grid[i-1][j]; } else { grid[i][j] += grid[i-1][j].max(grid[i][j-1]); } } } return grid[row-1][col-1]; } // https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/ pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 { let n = triangle.len(); let mut dp = vec![0; n+1]; for i in (0..n).rev() { for j in 0..=i { println!("i, j = {}, {}", i, j); dp[j] = dp[j].min(dp[j+1]) + triangle[i][j]; } } return dp[0]; } // https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/ pub fn two_sum(n: i32) -> Vec<f64> { let mut res = vec![1./6.;6]; for i in 1..n as usize { let mut temp = vec![0.0; 5 * i + 6]; for j in 0..res.len() { for k in 0..6 { temp[j+k] += res[j] * 1.0/6.0; } } res = temp; } return res; } // https://leetcode-cn.com/problems/minimum-path-sum/submissions/ pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); for i in 1..row { grid[i][0] += grid[i-1][0]; } for j in 1..col { grid[0][j] += grid[0][j-1]; } for i in 1..row { for j in 1..col { grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j]; } } return grid[row-1][col-1]; } fn main() { // generate_parenthesis(4); // println!("(1,1) {}", unique_paths_iter(1, 1)); // println!("(2,2) {}", unique_paths_iter(2, 2)); // println!("(3,2) {}", unique_paths_iter(3, 2)); // println!("(2,3) {}", unique_paths_iter(2, 3)); // rob([1, 3, 1, 3, 100].to_vec()); // max_product([-2,0,-1].to_vec()); // max_product([-1,-2,-9,-6].to_vec()); // max_profit([1,2,3].to_vec()); // word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec()); // dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec())); // dbg!(max_profit_cool([1,2,3,0,2].to_vec())); // let tri = [ // [2].to_vec(), // [3,4].to_vec(), // [6,5,7].to_vec(), // [4,1,8,3].to_vec() // ].to_vec(); // dbg!(minimum_total(tri)); // dbg!(two_sum(5)); min_path_sum2([ [1,3,1].to_vec(), [1,5,1].to_vec(), [4,2,1].to_vec(), ].to_vec()); }
d[r][c] + co
identifier_name
dp.rs
// 我们开始学习动态规划吧 use std::cmp::min; // https://leetcode-cn.com/problems/maximum-subarray // 最大子序各,好像看不出什么动态规则的意味,反而像滑动窗口 pub fn max_sub_array(nums: Vec<i32>) -> i32 { let mut sum = nums[0]; let mut ans = nums[0]; for i in 1..nums.len() { if sum > 0 { // add positive sum means larger sum += nums[i]; } else { // start from new one means larger sum = nums[i]; } // ans always store the largest sum ans = std::cmp::max(sum, ans); } ans } // https://leetcode-cn.com/problems/climbing-stairs/solution/ // basic dynamic programming pub fn climb_stairs(n: i32) -> i32 { if n == 0 || n == 1 { return 1; } // f(n) = f(n-1) + f(n-2) // iterative is harder than recursive let mut n_1 = 1; // f(n-1) let mut n_2 = 1; // f(n-2) let mut ans = 0; for _ in 1..n { ans = n_1 + n_2; n_1 = n_2; n_2 = ans; } ans } // https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock/solution/yi-ge-fang-fa-tuan-mie-6-dao-gu-piao-wen-ti-by-l-3/ // sell stock using state machine // this is the solution for infinite k pub fn max_profit_infinite(prices: Vec<i32>) -> i32 { let mut s_keep = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty = 0; for price in prices { s_keep = std::cmp::max(s_keep, s_empty - price); s_empty = std::cmp::max(s_empty, s_keep + price); } return s_empty; } // https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-with-cooldown/solution/zhuang-tai-ji-mo-xing-dp-by-acw_wangdh15/ // 用有限状态机的方式去解题 use std::i32; pub fn max_profit_cool(prices: Vec<i32>) -> i32 { let n = prices.len(); let mut dp = vec![vec![i32::MIN; 3]; n+1]; // 0 可以买入的状态,买入之后转移到状态1。可以原地保持状态,或者从冷冻态转过来 // 1 可以卖出的状态,卖出之后转移到状态2。可以原地保持状态,或者从状态0转过来 // 2 冷冻期,过了一天转入状态0。可以从状态1转过来。 // 0 明天可买入,要么今天不买,要么今天是冷冻期 // 1 明天可卖出:要么今天买,要么今天不卖 // 2 明天是冷冻,那就今天卖了吧 dp[0][0] = 0; for i in 0..n { dp[i+1][0] = dp[i][0].max(dp[i][2]); // 来自 0 和 2 的转移 dp[i+1][1] = dp[i][1].max(dp[i][0] - prices[i]); dp[i+1][2] = dp[i][1] + prices[i]; // println!("dp[i][0]: {}", dp[i][0]); // println!("dp[i][1]: {}", dp[i][1]); // println!("dp[i][2]: {}", dp[i][2]); } return dp[n][0].max(dp[n][2]); } pub fn max_profit_once(prices: Vec<i32>) -> i32 { // suffix 0 means no trade (buy or sell) happen // 1 means it happend // let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty_0 = 0; let mut s_keep_1 = std::i32::MIN; let mut s_empty_1 = std::i32::MIN; for price in prices { s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price); s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price); } return std::cmp::max(s_empty_1, 0); } pub fn max_profit_twice(prices: Vec<i32>) -> i32 { // suffix 0 means no trade (buy or sell) happen // 1 means it happend // let mut s_keep_0 = std::i32::MIN; // you could not keep any stock on the very first day let mut s_empty_0 = 0; let mut s_keep_1 = std::i32::MIN; let mut s_empty_1 = std::i32::MIN; let mut s_keep_2 = std::i32::MIN; let mut s_empty_2 = std::i32::MIN; for price in prices { s_keep_1 = std::cmp::max(s_keep_1, s_empty_0 - price); s_empty_1 = std::cmp::max(s_empty_1, s_keep_1 + price); s_keep_2 = std::cmp::max(s_keep_2, s_empty_1 - price); s_empty_2 = std::cmp::max(s_empty_2, s_keep_2 + price); } return std::cmp::max(s_empty_2, 0); } // this one works but consume too much memory pub fn max_profit_k_memory_consume(k: i32, prices: Vec<i32>) -> i32 { // from example above, we know the initial value is 0 // here, k become a variable, some we need a matrix to // store different status // how many status we have? // empty or keep => 2 // trade times => k // so we have 2k status let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep let mut s_times: Vec<[i32;2]> = Vec::new(); let k: usize = k as usize; for i in 0..k+1 { s_times.push(s_trade.clone()); } s_times[0][0] = 0; for price in prices { for j in 0..k { s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price); s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price); } } return std::cmp::max(0, s_times[k][0]); } // memory efficient version pub fn max_profit_k(k: i32, prices: Vec<i32>) -> i32 { // here if k in unreasonable large, switch to infinite version let k: usize = k as usize; if k > prices.len()/2 { return max_profit_infinite(prices); } let mut s_trade: [i32; 2] = [std::i32::MIN, std::i32::MIN]; // trade state: empty or keep let mut s_times: Vec<[i32;2]> = Vec::new(); for i in 0..k+1 { s_times.push(s_trade.clone()); } s_times[0][0] = 0; for price in prices { for j in 0..k { s_times[j+1][1] = std::cmp::max(s_times[j+1][1], s_times[j][0] - price); s_times[j+1][0] = std::cmp::max(s_times[j+1][0], s_times[j+1][1] + price); } } return std::cmp::max(0, s_times[k][0]); } // shortest path // https://leetcode-cn.com/problems/minimum-path-sum/ // way: set grid value as the cost to get there // matrix: // 1 0 1 1 1 2 // 2 3 5 => 3 4 7 // 5 3 2 8 7 9 pub fn min_path_sum(grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); let mut cost = grid.clone(); for r in 0..row { for c in 0..col { if r == 0 && c == 0 { cost[r][c] = grid[r][c]; } else if r == 0 { cost[r][c] = grid[r][c] + cost[r][c-1]; } else if c == 0 { cost[r][c] = grid[r][c] + cost[r-1][c]; } else { cost[r][c] = grid[r][c] + min(cost[r-1][c], cost[r][c-1]); } } } return cost[row-1][col-1]; } // https://leetcode-cn.com/problems/generate-parentheses/solution/ pub fn generate_parenthesis(n: i32) -> Vec<String> { if n == 0 { return Vec::new(); } let mut dp = vec![Vec::<String>::new(); (n+1) as usize]; dp[0] = vec![String::from("")]; for i in 1..=n { println!("Round {}", i); let mut cur = vec![]; for j in 0..i { let left = &dp[j as usize]; let right = &dp[(i-j-1) as usize]; for l in left { for r in right { let tmp = format!("({}){}", l, r); println!("new string {}", tmp); cur.push(tmp); } } } dp[i as usize] = cur; } let res = dp.pop().unwrap(); return res } // https://leetcode-cn.com/problems/unique-paths/ // 到达P[i][j]的路径数 = P[i-1][j] + P[i][j-1] pub fn unique_paths(m: i32, n: i32) -> i32 { if m == 1 || n == 1 { return 1; } else { return unique_paths(m - 1, n) + unique_paths(m, n - 1); } } pub fn unique_paths_iter(m: i32, n: i32) -> i32 { let m: usize = m as usize; let n: usize = n as usize; let mut cache = vec![vec![0; n]; m]; for i in 0..m { for j in 0..n { if i == 0 || j == 0 { cache[i][j] = 1; } else { cache[i][j] = cache[i-1][j] + cache[i][j-1]; } } } return cache[m-1][n-1] as i32; } // https://leetcode-cn.com/problems/unique-paths-ii/solution/ pub fn unique_paths_with_obstacles2(obstacle_grid: Vec<Vec<i32>>) -> i32 { let m = obstacle_grid.len(); let n = obstacle_grid[0].len(); let mut cache = vec![vec![0; n]; m]; for i in 0..m { for j in 0..n { if obstacle_grid[i][j] == 1 { cache[i][j] = 0; } else if i == 0 && j == 0 { cache[i][j] = 1; } else if i == 0 { cache[i][j] = cache[i][j-1]; } else if j == 0 { cache[i][j] = cache[i-1][j]; } else { cache[i][j] = cache[i-1][j] + cache[i][j-1]; } } } return cache[m-1][n-1]; } // https://leetcode-cn.com/problems/house-robber/submissions/ pub fn rob(nums: Vec<i32>) -> i32 { let len = nums.len(); if len == 0 { return 0; } else if len == 1 { return nums[0]; } else if len == 2 { return nums[0].max(nums[1]); } // else len > 2 let mut m1 = nums[0]; let mut m2 = nums[1].max(m1); for i in 2..nums.len() { println!("m1 {} m2 {}", m1, m2); m1 = (m1 + nums[i]).max(m2); let temp = m2; m2 = m1; m1 = temp; } println!("m1 {} m2 {}", m1, m2); return m2; } // https://leetcode-cn.com/problems/maximum-product-subarray/submissions/ pub fn max_product(nums: Vec<i32>) -> i32 { if nums.len() == 0 { return 0; } let (mut max, mut min) = (1, 1); let mut res = std::i32::MIN; let len = nums.len(); // 由于有 if 在循环里面,所以速度慢! for n in nums { let t_max = max; let t_min = min; max = (t_max * n).max(n).max(t_min * n); min = (t_min * n).min(n).min(t_max * n); res = res.max(max); } println!("{}", res); return res; } // https://leetcode-cn.com/problems/gu-piao-de-zui-da-li-run-lcof/ // 由于只买卖一次,所以只需要记录最低价格就好了 pub fn max_profit(mut prices: Vec<i32>) -> i32 { let mut profit = 0; let mut cost = 1<<30; for i in 0..prices.len() { cost = cost.min(prices[i]); profit = (prices[i] - cost).max(profit); } return profit; } // https://leetcode-cn.com/problems/word-break/ pub fn word_break(s: String, word_dict: Vec<String>) -> bool { if word_dict.is_empty() { return false; } let len = s.len(); let mut dp: Vec<bool> = vec![false; len+1]; dp[0] = true; for i in 0..len { if !dp[i] { continue; } for w in &word_dict { let end = i + w.len(); if end <= len && !dp[end] && &s[i..end] == w.as_str() { dp[end] = true; } } } dp[len] } // https://leetcode-cn.com/problems/maximum-length-of-repeated-subarray/solution/ // 相当于填表 pub fn find_length(a: Vec<i32>, b: Vec<i32>) -> i32 { let row = a.len(); let col = b.len(); let mut dp = vec![vec![0; col]; row]; let mut res = 0; for i in 0..row { for j in 0..col { if a[i] == b[j] { let last = if ( i == 0 || j == 0 ) { 0 } else { dp[i-1][j-1] }; dp[i][j] = last + 1; res = res.max(dp[i][j]); } else { dp[i][j] = 0; } } } return res as i32; } // https://leetcode-cn.com/problems/unique-paths-ii/ pub fn unique_paths_with_obstacles(obstacle_grid: Vec<Vec<i32>>) -> i32 { let row = obstacle_grid.len(); let col = obstacle_grid[0].len(); let mut dp = vec![vec![0; col]; row]; // init first row and col for i in 0..row { for j in 0..col { if obstacle_grid[i][j] == 0 { if i == 0 && j == 0 { dp[i][j] = 1; } else if i == 0 { dp[i][j] = dp[i][j-1]; } else if j == 0 { dp[i][j] = dp[i-1][j]; } else { dp[i][j] = dp[i-1][j] + dp[i][j-1]; } } else { // 遇到障碍了,但一开始我们就是初始化为0的,所以这里其实可以不写 dp[i][j] = 0; } } } return dp[row-1][col-1]; } // https://leetcode-cn.com/problems/re-space-lcci/ pub fn respace(dictionary: Vec<String>, sentence: String) -> i32 { 42 } // https://leetcode-cn.com/problems/li-wu-de-zui-da-jie-zhi-lcof/ pub fn max_value(mut grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); for i in 0..row { for j in 0..col { if i == 0 && j == 0 { // pass } else if i == 0 { grid[i][j] += grid[i][j-1]; } else if j == 0 { grid[i][j] += grid[i-1][j]; } else { grid[i][j] += grid[i-1][j].max(grid[i][j-1]); } } } return grid[row-1][col-1]; } // https://leetcode-cn.com/problems/triangle/solution/di-gui-ji-yi-hua-dp-bi-xu-miao-dong-by-sweetiee/ pub fn minimum_total(triangle: Vec<Vec<i32>>) -> i32 { let n = triangle.len(); let mut dp = vec![0; n+1]; for i in (0..n).rev() { for j in 0..=i { println!
+1]) + triangle[i][j]; } } return dp[0]; } // https://leetcode-cn.com/problems/nge-tou-zi-de-dian-shu-lcof/solution/ pub fn two_sum(n: i32) -> Vec<f64> { let mut res = vec![1./6.;6]; for i in 1..n as usize { let mut temp = vec![0.0; 5 * i + 6]; for j in 0..res.len() { for k in 0..6 { temp[j+k] += res[j] * 1.0/6.0; } } res = temp; } return res; } // https://leetcode-cn.com/problems/minimum-path-sum/submissions/ pub fn min_path_sum2(mut grid: Vec<Vec<i32>>) -> i32 { let row = grid.len(); let col = grid[0].len(); for i in 1..row { grid[i][0] += grid[i-1][0]; } for j in 1..col { grid[0][j] += grid[0][j-1]; } for i in 1..row { for j in 1..col { grid[i][j] = grid[i][j-1].min(grid[i-1][j]) + grid[i][j]; } } return grid[row-1][col-1]; } fn main() { // generate_parenthesis(4); // println!("(1,1) {}", unique_paths_iter(1, 1)); // println!("(2,2) {}", unique_paths_iter(2, 2)); // println!("(3,2) {}", unique_paths_iter(3, 2)); // println!("(2,3) {}", unique_paths_iter(2, 3)); // rob([1, 3, 1, 3, 100].to_vec()); // max_product([-2,0,-1].to_vec()); // max_product([-1,-2,-9,-6].to_vec()); // max_profit([1,2,3].to_vec()); // word_break("leetcode".to_string(), ["leet".to_string(), "code".to_string()].to_vec()); // dbg!(find_length([1,2,3,2,1].to_vec(), [3,2,1,4,7].to_vec())); // dbg!(max_profit_cool([1,2,3,0,2].to_vec())); // let tri = [ // [2].to_vec(), // [3,4].to_vec(), // [6,5,7].to_vec(), // [4,1,8,3].to_vec() // ].to_vec(); // dbg!(minimum_total(tri)); // dbg!(two_sum(5)); min_path_sum2([ [1,3,1].to_vec(), [1,5,1].to_vec(), [4,2,1].to_vec(), ].to_vec()); }
("i, j = {}, {}", i, j); dp[j] = dp[j].min(dp[j
conditional_block